From a5705e0698d59d8bdb0b8ef3404620492dedef99 Mon Sep 17 00:00:00 2001 From: Enxebre Date: Mon, 29 Apr 2019 15:18:16 +0200 Subject: [PATCH] Revendor to accomodate the CRDs for https://github.com/openshift/cluster-api/pull/34 --- Gopkg.lock | 8 +- .../cluster-api-actuator-pkg/Gopkg.lock | 4 +- .../cluster-api-actuator-pkg/Makefile | 25 ++- .../pkg/e2e/autoscaler/autoscaler.go | 183 +++++++++--------- .../pkg/manifests/manifests.go | 26 +-- .../pkg/apis/machine/v1beta1/common_types.go | 26 +-- .../pkg/apis/machine/v1beta1/machine_types.go | 75 +++---- .../machine/v1beta1/zz_generated.deepcopy.go | 81 -------- 8 files changed, 143 insertions(+), 285 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 0b64d0c4fe..3db87d5d2e 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -413,7 +413,7 @@ [[projects]] branch = "openshift-4.0-cluster-api-0.0.0-alpha.4" - digest = "1:254e6bfaccd0d24b32e3886403467173cc70f111c2ab23de37307b1eacca996b" + digest = "1:fd7bc783f3b4e5894248bad9f17e9972a93b70b46b1f45dd154dee56eaf5e111" name = "github.com/openshift/cluster-api" packages = [ "pkg/apis/cluster/common", @@ -437,11 +437,11 @@ "pkg/util", ] pruneopts = "NUT" - revision = "34fb0d070a2ed877b1df06a2b79bbc4f92bcad2d" + revision = "d8958b539e331bf5ebb056b7f872541e13fb8e01" [[projects]] branch = "master" - digest = "1:7cbcae314ea9375bebbecfc8b520bbce9786f9c4d1b42824f2a1613695e3faad" + digest = "1:c51e502153acd32f728210817c40fc8e1d6851d2b64a0be0b5c0186ee32218ac" name = "github.com/openshift/cluster-api-actuator-pkg" packages = [ "pkg/e2e", @@ -454,7 +454,7 @@ "pkg/types", ] pruneopts = "" - revision = "59a034672129ff21181ac38871a47dabbc900c7f" + revision = "286578aa9fd0510b7c37fbd4b4ab9c0eab7b03eb" [[projects]] branch = "master" diff --git a/vendor/github.com/openshift/cluster-api-actuator-pkg/Gopkg.lock b/vendor/github.com/openshift/cluster-api-actuator-pkg/Gopkg.lock index ab96cbd654..13955cb251 100644 --- a/vendor/github.com/openshift/cluster-api-actuator-pkg/Gopkg.lock +++ b/vendor/github.com/openshift/cluster-api-actuator-pkg/Gopkg.lock @@ -372,7 +372,7 @@ [[projects]] branch = "openshift-4.0-cluster-api-0.0.0-alpha.4" - digest = "1:dbd28c2d6b9c58d0dfbfaa7cb2efce07fc36f812c2eb52197856d40d91a635bb" + digest = "1:70aff930abe993c5b2c11128f615ac7ad529e99d82ee3fb13e41e2afac30a38c" name = "github.com/openshift/cluster-api" packages = [ "pkg/apis/cluster/common", @@ -388,7 +388,7 @@ "pkg/util", ] pruneopts = "" - revision = "5c833e464afbd8a9b3aca3f1d09943478bd90521" + revision = "d8958b539e331bf5ebb056b7f872541e13fb8e01" [[projects]] branch = "master" diff --git a/vendor/github.com/openshift/cluster-api-actuator-pkg/Makefile b/vendor/github.com/openshift/cluster-api-actuator-pkg/Makefile index 2d4952d5fd..dbf8725c08 100644 --- a/vendor/github.com/openshift/cluster-api-actuator-pkg/Makefile +++ b/vendor/github.com/openshift/cluster-api-actuator-pkg/Makefile @@ -45,22 +45,21 @@ build-e2e: .PHONY: test-e2e test-e2e: ## Run openshift specific e2e test - go test -timeout 90m \ - -v github.com/openshift/cluster-api-actuator-pkg/pkg/e2e \ - -kubeconfig $${KUBECONFIG:-~/.kube/config} \ - -machine-api-namespace $${NAMESPACE:-openshift-machine-api} \ - -ginkgo.v \ - -args -v 5 -logtostderr true + # Run operator tests first to preserve logs for troubleshooting test + # failures and flakes. + # Feature:Operator tests remove deployments. Thus loosing all the logs + # previously acquired. + hack/ci-integration.sh -ginkgo.v -ginkgo.noColor=true -ginkgo.focus "Feature:Operators" + hack/ci-integration.sh -ginkgo.v -ginkgo.noColor=true -ginkgo.skip "Feature:Operators" .PHONY: k8s-e2e k8s-e2e: ## Run k8s specific e2e test - go test -timeout 30m \ - -v github.com/openshift/cluster-api-actuator-pkg/pkg/e2e \ - -kubeconfig $${KUBECONFIG:-~/.kube/config} \ - -machine-api-namespace $${NAMESPACE:-kube-system} \ - -ginkgo.v \ - -args -v 5 -logtostderr true - + # Run operator tests first to preserve logs for troubleshooting test + # failures and flakes. + # Feature:Operator tests remove deployments. Thus loosing all the logs + # previously acquired. + NAMESPACE=kube-system hack/ci-integration.sh -ginkgo.v -ginkgo.noColor=true -ginkgo.focus "Feature:Operators" + NAMESPACE=kube-system hack/ci-integration.sh -ginkgo.v -ginkgo.noColor=true -ginkgo.skip "Feature:Operators" .PHONY: help help: diff --git a/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/autoscaler/autoscaler.go b/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/autoscaler/autoscaler.go index 3fd410bdf5..99c83f0105 100644 --- a/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/autoscaler/autoscaler.go +++ b/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/autoscaler/autoscaler.go @@ -178,13 +178,87 @@ func machineAutoscalerResource(targetMachineSet *mapiv1beta1.MachineSet, minRepl var _ = g.Describe("[Feature:Machines] Autoscaler should", func() { defer g.GinkgoRecover() + scaleUpFunc := func(client runtimeclient.Client, targetMachineSet mapiv1beta1.MachineSet, nodeTestLabel string, initialNumberOfReplicas int32, expectedReplicas int32) { + g.By(fmt.Sprintf("Creating MachineAutoscaler object for targetMachineSet %q", targetMachineSet.Name)) + machineAutoscaler := machineAutoscalerResource(&targetMachineSet, 1, expectedReplicas) + err := client.Create(context.TODO(), machineAutoscaler) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By(fmt.Sprintf("Waiting for cluster to scale out number of replicas of targetMachineSet %q", targetMachineSet.Name)) + err = wait.PollImmediate(5*time.Second, e2e.WaitLong, func() (bool, error) { + ms, err := e2e.GetMachineSet(context.TODO(), client, targetMachineSet.Name) + if err != nil { + glog.Errorf("error getting machineset object: %v, retrying...", err) + return false, nil + } + glog.Infof("MachineSet %q. Initial number of replicas: %d. Current number of replicas: %d", targetMachineSet.Name, initialNumberOfReplicas, pointer.Int32PtrDerefOr(ms.Spec.Replicas, e2e.DefaultMachineSetReplicas)) + return pointer.Int32PtrDerefOr(ms.Spec.Replicas, e2e.DefaultMachineSetReplicas) == expectedReplicas, nil + }) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By(fmt.Sprintf("Waiting for cluster to scale up nodes for targetMachineSet %q", targetMachineSet.Name)) + err = wait.PollImmediate(5*time.Second, e2e.WaitLong, func() (bool, error) { + scaledMachines := mapiv1beta1.MachineList{} + if err := client.List(context.TODO(), runtimeclient.MatchingLabels(targetMachineSet.Spec.Selector.MatchLabels), &scaledMachines); err != nil { + glog.Errorf("Error querying api for machineset object: %v, retrying...", err) + return false, nil + } + + // get all linked nodes and label them + nodeCounter := 0 + for _, machine := range scaledMachines.Items { + if machine.Status.NodeRef == nil { + glog.Errorf("Machine %q does not have node reference set", machine.Name) + return false, nil + } + glog.Infof("Machine %q is linked to node %q", machine.Name, machine.Status.NodeRef.Name) + nodeCounter++ + } + + glog.Infof("Expecting %d nodes. Current number of nodes in the group: %d", expectedReplicas, nodeCounter) + return nodeCounter == int(expectedReplicas), nil + }) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By(fmt.Sprintf("Labeling all nodes belonging to the machineset %q (after scale up phase)", targetMachineSet.Name)) + err = labelMachineSetNodes(client, &targetMachineSet, nodeTestLabel) + o.Expect(err).NotTo(o.HaveOccurred()) + } + + scaleDownFunc := func(client runtimeclient.Client, targetMachineSet string, nodeTestLabel string, initialNumberOfReplicas int32) { + g.By(fmt.Sprintf("Waiting for machineset %q to have at most initial number of replicas", targetMachineSet)) + err := wait.PollImmediate(5*time.Second, e2e.WaitLong, func() (bool, error) { + ms, err := e2e.GetMachineSet(context.TODO(), client, targetMachineSet) + if err != nil { + glog.Errorf("error getting machineset object: %v, retrying...", err) + return false, nil + } + msReplicas := pointer.Int32PtrDerefOr(ms.Spec.Replicas, e2e.DefaultMachineSetReplicas) + glog.Infof("Initial number of replicas: %d. Current number of replicas: %d", initialNumberOfReplicas, msReplicas) + if msReplicas > initialNumberOfReplicas { + return false, nil + } + + // Make sure all scaled down nodes are really gone (so they don't affect tests to be run next) + scaledNodes := corev1.NodeList{} + if err := client.List(context.TODO(), runtimeclient.MatchingLabels(map[string]string{nodeTestLabel: ""}), &scaledNodes); err != nil { + glog.Errorf("Error querying api for node objects: %v, retrying...", err) + return false, nil + } + scaledNodesLen := int32(len(scaledNodes.Items)) + glog.Infof("Current number of replicas: %d. Current number of nodes: %d", msReplicas, scaledNodesLen) + return scaledNodesLen <= msReplicas && scaledNodesLen <= initialNumberOfReplicas, nil + }) + o.Expect(err).NotTo(o.HaveOccurred()) + } g.It("scale out", func() { var err error client, err := e2e.LoadClient() o.Expect(err).NotTo(o.HaveOccurred()) - nodeTestLabel := fmt.Sprintf("machine.openshift.io/autoscaling-test-%v", string(uuid.NewUUID())) + nodeTestLabel0 := fmt.Sprintf("machine.openshift.io/autoscaling-test-%v", string(uuid.NewUUID())) + nodeTestLabel1 := fmt.Sprintf("machine.openshift.io/autoscaling-test-%v", string(uuid.NewUUID())) // We want to clean up these objects on any subsequent error. defer func() { @@ -213,19 +287,25 @@ var _ = g.Describe("[Feature:Machines] Autoscaler should", func() { } }() - g.By("Getint target machineSet") + g.By("Getting target machineSet") machinesets, err := e2e.GetMachineSets(context.TODO(), client) o.Expect(err).NotTo(o.HaveOccurred()) - o.Expect(len(machinesets)).To(o.BeNumerically(">", 0)) + o.Expect(len(machinesets)).To(o.BeNumerically(">", 1)) - targetMachineSet := machinesets[0] - glog.Infof("Target machineSet %s", targetMachineSet.Name) + targetMachineSet0 := machinesets[0] + glog.Infof("Target machineSet0 %q", targetMachineSet0.Name) + targetMachineSet1 := machinesets[1] + glog.Infof("Target machineSet1 %q", targetMachineSet1.Name) // When we add support for machineDeployments on the installer, cluster-autoscaler and cluster-autoscaler-operator // we need to test against deployments instead so we skip this test. - if ownerReferences := targetMachineSet.GetOwnerReferences(); len(ownerReferences) > 0 { + if ownerReferences0 := targetMachineSet0.GetOwnerReferences(); len(ownerReferences0) > 0 { // glog.Infof("MachineSet %s is owned by a machineDeployment. Please run tests against machineDeployment instead", targetMachineSet.Name) - g.Skip(fmt.Sprintf("MachineSet %s is owned by a machineDeployment. Please run tests against machineDeployment instead", targetMachineSet.Name)) + g.Skip(fmt.Sprintf("MachineSet %q is owned by a machineDeployment. Please run tests against machineDeployment instead", targetMachineSet0.Name)) + } + + if ownerReferences1 := targetMachineSet1.GetOwnerReferences(); len(ownerReferences1) > 0 { + g.Skip(fmt.Sprintf("MachineSet %q is owned by a machineDeployment. Please run tests against machineDeployment instead", targetMachineSet1.Name)) } g.By("Create ClusterAutoscaler object") @@ -233,68 +313,17 @@ var _ = g.Describe("[Feature:Machines] Autoscaler should", func() { err = client.Create(context.TODO(), clusterAutoscaler) o.Expect(err).NotTo(o.HaveOccurred()) - initialNumberOfReplicas := pointer.Int32PtrDerefOr(targetMachineSet.Spec.Replicas, e2e.DefaultMachineSetReplicas) - - g.By("Creating MachineAutoscaler objects") - machineAutoscaler := machineAutoscalerResource(&targetMachineSet, 1, initialNumberOfReplicas+1) - err = client.Create(context.TODO(), machineAutoscaler) - o.Expect(err).NotTo(o.HaveOccurred()) - - g.By("Labeling all nodes belonging to the machineset (before scale up phase)") - err = labelMachineSetNodes(client, &targetMachineSet, nodeTestLabel) - o.Expect(err).NotTo(o.HaveOccurred()) - - glog.Info("Get nodeList") - nodeList := corev1.NodeList{} - err = client.List(context.TODO(), runtimeclient.MatchingLabels(map[string]string{nodeTestLabel: ""}), &nodeList) - o.Expect(err).NotTo(o.HaveOccurred()) - - nodeGroupInitialTotalNodes := len(nodeList.Items) - glog.Infof("Cluster initial number of nodes in node group %v is %d", targetMachineSet.Name, nodeGroupInitialTotalNodes) + initialNumberOfReplicas0 := pointer.Int32PtrDerefOr(targetMachineSet0.Spec.Replicas, e2e.DefaultMachineSetReplicas) + initialNumberOfReplicas1 := pointer.Int32PtrDerefOr(targetMachineSet1.Spec.Replicas, e2e.DefaultMachineSetReplicas) + glog.Infof("initialNumberOfReplicas0 %d, initialNumberOfReplicas1 %d", initialNumberOfReplicas0, initialNumberOfReplicas1) + o.Expect(initialNumberOfReplicas0).To(o.BeNumerically("==", initialNumberOfReplicas1)) g.By("Creating workload") err = client.Create(context.TODO(), newWorkLoad()) o.Expect(err).NotTo(o.HaveOccurred()) - g.By("Waiting for cluster to scale out number of replicas") - err = wait.PollImmediate(5*time.Second, e2e.WaitLong, func() (bool, error) { - ms, err := e2e.GetMachineSet(context.TODO(), client, targetMachineSet.Name) - if err != nil { - glog.Errorf("error getting machineset object: %v, retrying...", err) - return false, nil - } - glog.Infof("MachineSet %s. Initial number of replicas: %d. Current number of replicas: %d", targetMachineSet.Name, initialNumberOfReplicas, pointer.Int32PtrDerefOr(ms.Spec.Replicas, e2e.DefaultMachineSetReplicas)) - return pointer.Int32PtrDerefOr(ms.Spec.Replicas, e2e.DefaultMachineSetReplicas) > initialNumberOfReplicas, nil - }) - o.Expect(err).NotTo(o.HaveOccurred()) - - g.By("Waiting for cluster to scale up nodes") - err = wait.PollImmediate(5*time.Second, e2e.WaitLong, func() (bool, error) { - scaledMachines := mapiv1beta1.MachineList{} - if err := client.List(context.TODO(), runtimeclient.MatchingLabels(targetMachineSet.Spec.Selector.MatchLabels), &scaledMachines); err != nil { - glog.Errorf("Error querying api for machineset object: %v, retrying...", err) - return false, nil - } - - // get all linked nodes and label them - nodeCounter := 0 - for _, machine := range scaledMachines.Items { - if machine.Status.NodeRef == nil { - glog.Errorf("Machine %q does not have node reference set", machine.Name) - return false, nil - } - glog.Infof("Machine %q is linked to node %q", machine.Name, machine.Status.NodeRef.Name) - nodeCounter++ - } - - glog.Infof("Expecting at least one new node to come up. Initial number of node group nodes: %d. Current number of nodes in the group: %d", nodeGroupInitialTotalNodes, nodeCounter) - return nodeCounter > nodeGroupInitialTotalNodes, nil - }) - o.Expect(err).NotTo(o.HaveOccurred()) - - g.By("Labeling all nodes belonging to the machineset (after scale up phase)") - err = labelMachineSetNodes(client, &targetMachineSet, nodeTestLabel) - o.Expect(err).NotTo(o.HaveOccurred()) + scaleUpFunc(client, targetMachineSet1, nodeTestLabel1, initialNumberOfReplicas1, initialNumberOfReplicas1+2) + scaleUpFunc(client, targetMachineSet0, nodeTestLabel0, initialNumberOfReplicas0, initialNumberOfReplicas1+1) // Delete workload g.By("Deleting workload") @@ -305,30 +334,8 @@ var _ = g.Describe("[Feature:Machines] Autoscaler should", func() { // start to scale down the unneeded nodes. We wait for that // condition; if successful we assert that (a smoke test of) // scale down is functional. - g.By("Waiting for cluster to have at most initial number of replicas") - err = wait.PollImmediate(5*time.Second, e2e.WaitLong, func() (bool, error) { - ms, err := e2e.GetMachineSet(context.TODO(), client, targetMachineSet.Name) - if err != nil { - glog.Errorf("error getting machineset object: %v, retrying...", err) - return false, nil - } - msReplicas := pointer.Int32PtrDerefOr(ms.Spec.Replicas, e2e.DefaultMachineSetReplicas) - glog.Infof("Initial number of replicas: %d. Current number of replicas: %d", initialNumberOfReplicas, msReplicas) - if msReplicas > initialNumberOfReplicas { - return false, nil - } - - // Make sure all scaled down nodes are really gone (so they don't affect tests to be run next) - scaledNodes := corev1.NodeList{} - if err := client.List(context.TODO(), runtimeclient.MatchingLabels(map[string]string{nodeTestLabel: ""}), &scaledNodes); err != nil { - glog.Errorf("Error querying api for node objects: %v, retrying...", err) - return false, nil - } - scaledNodesLen := int32(len(scaledNodes.Items)) - glog.Infof("Current number of replicas: %d. Current number of nodes: %d", msReplicas, scaledNodesLen) - return scaledNodesLen <= msReplicas && scaledNodesLen <= initialNumberOfReplicas, nil - }) - o.Expect(err).NotTo(o.HaveOccurred()) + scaleDownFunc(client, targetMachineSet0.Name, nodeTestLabel0, initialNumberOfReplicas0) + scaleDownFunc(client, targetMachineSet1.Name, nodeTestLabel1, initialNumberOfReplicas1) }) }) diff --git a/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/manifests/manifests.go b/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/manifests/manifests.go index d5ead12d4d..dceb3efc16 100644 --- a/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/manifests/manifests.go +++ b/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/manifests/manifests.go @@ -948,7 +948,7 @@ func TestingMachine(clusterID string, namespace string, providerSpec machinev1be Namespace: namespace, GenerateName: "vs-master-", Labels: map[string]string{ - "sigs.k8s.io/cluster-api-cluster": clusterID, + "machine.openshift.io/cluster-api-cluster": clusterID, }, }, Spec: machinev1beta1.MachineSpec{ @@ -958,10 +958,6 @@ func TestingMachine(clusterID string, namespace string, providerSpec machinev1be }, }, ProviderSpec: providerSpec, - Versions: machinev1beta1.MachineVersionInfo{ - Kubelet: "1.10.1", - ControlPlane: "1.10.1", - }, }, } @@ -976,15 +972,11 @@ func MasterMachine(clusterID, namespace string, providerSpec machinev1beta1.Prov Namespace: namespace, GenerateName: "vs-master-", Labels: map[string]string{ - "sigs.k8s.io/cluster-api-cluster": clusterID, + "machine.openshift.io/cluster-api-cluster": clusterID, }, }, Spec: machinev1beta1.MachineSpec{ ProviderSpec: providerSpec, - Versions: machinev1beta1.MachineVersionInfo{ - Kubelet: "1.10.1", - ControlPlane: "1.10.1", - }, }, } @@ -1050,14 +1042,14 @@ func WorkerMachineSet(clusterID, namespace string, providerSpec machinev1beta1.P Namespace: namespace, GenerateName: clusterID + "-worker-machine-" + randomUUID[:6] + "-", Labels: map[string]string{ - "sigs.k8s.io/cluster-api-cluster": clusterID, + "machine.openshift.io/cluster-api-cluster": clusterID, }, }, Spec: machinev1beta1.MachineSetSpec{ Selector: metav1.LabelSelector{ MatchLabels: map[string]string{ - "sigs.k8s.io/cluster-api-machineset": clusterID + "-worker-machineset-" + randomUUID[:6], - "sigs.k8s.io/cluster-api-cluster": clusterID, + "machine.openshift.io/cluster-api-machineset": clusterID + "-worker-machineset-" + randomUUID[:6], + "machine.openshift.io/cluster-api-cluster": clusterID, }, }, Replicas: &replicas, @@ -1065,8 +1057,8 @@ func WorkerMachineSet(clusterID, namespace string, providerSpec machinev1beta1.P ObjectMeta: metav1.ObjectMeta{ GenerateName: clusterID + "-worker-machine-" + randomUUID[:6] + "-", Labels: map[string]string{ - "sigs.k8s.io/cluster-api-machineset": clusterID + "-worker-machineset-" + randomUUID[:6], - "sigs.k8s.io/cluster-api-cluster": clusterID, + "machine.openshift.io/cluster-api-machineset": clusterID + "-worker-machineset-" + randomUUID[:6], + "machine.openshift.io/cluster-api-cluster": clusterID, }, }, Spec: machinev1beta1.MachineSpec{ @@ -1076,10 +1068,6 @@ func WorkerMachineSet(clusterID, namespace string, providerSpec machinev1beta1.P }, }, ProviderSpec: providerSpec, - Versions: machinev1beta1.MachineVersionInfo{ - Kubelet: "1.10.1", - ControlPlane: "1.10.1", - }, }, }, }, diff --git a/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/common_types.go b/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/common_types.go index 7884754101..5ee990c9f2 100644 --- a/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/common_types.go +++ b/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/common_types.go @@ -17,8 +17,7 @@ limitations under the License. package v1beta1 import ( - corev1 "k8s.io/api/core/v1" - runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" ) // ProviderSpec defines the configuration to use during node creation. @@ -32,27 +31,4 @@ type ProviderSpec struct { // field, akin to component config. // +optional Value *runtime.RawExtension `json:"value,omitempty"` - - // Source for the provider configuration. Cannot be used if value is - // not empty. - // +optional - ValueFrom *ProviderSpecSource `json:"valueFrom,omitempty"` -} - -// ProviderSpecSource represents a source for the provider-specific -// resource configuration. -type ProviderSpecSource struct { - // The machine class from which the provider config should be sourced. - // +optional - MachineClass *MachineClassRef `json:"machineClass,omitempty"` -} - -// MachineClassRef is a reference to the MachineClass object. Controllers should find the right MachineClass using this reference. -type MachineClassRef struct { - // +optional - *corev1.ObjectReference `json:",inline"` - - // Provider is the name of the cloud-provider which MachineClass is intended for. - // +optional - Provider string `json:"provider,omitempty"` } diff --git a/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/machine_types.go b/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/machine_types.go index e7fbb4b7fe..cab20ee095 100644 --- a/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/machine_types.go +++ b/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/machine_types.go @@ -17,9 +17,12 @@ limitations under the License. package v1beta1 import ( + "fmt" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" "github.com/openshift/cluster-api/pkg/apis/machine/common" ) @@ -30,6 +33,10 @@ const ( // MachineClusterLabelName is the label set on machines linked to a cluster. MachineClusterLabelName = "cluster.k8s.io/cluster-name" + + // MachineClusterIDLabel is the label that a machine must have to identify the + // cluster to which it belongs. + MachineClusterIDLabel = "machine.openshift.io/cluster-api-cluster" ) // +genclient @@ -68,23 +75,6 @@ type MachineSpec struct { // +optional ProviderSpec ProviderSpec `json:"providerSpec"` - // Versions of key software to use. This field is optional at cluster - // creation time, and omitting the field indicates that the cluster - // installation tool should select defaults for the user. These - // defaults may differ based on the cluster installer, but the tool - // should populate the values it uses when persisting Machine objects. - // A Machine spec missing this field at runtime is invalid. - // +optional - Versions MachineVersionInfo `json:"versions,omitempty"` - - // ConfigSource is used to populate in the associated Node for dynamic kubelet config. This - // field already exists in Node, so any updates to it in the Machine - // spec will be automatically copied to the linked NodeRef from the - // status. The rest of dynamic kubelet config support should then work - // as-is. - // +optional - ConfigSource *corev1.NodeConfigSource `json:"configSource,omitempty"` - // ProviderID is the identification ID of the machine provided by the provider. // This field must match the provider ID as seen on the node object corresponding to this machine. // This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler @@ -112,22 +102,6 @@ type MachineStatus struct { // +optional LastUpdated *metav1.Time `json:"lastUpdated,omitempty"` - // Versions specifies the current versions of software on the corresponding Node (if it - // exists). This is provided for a few reasons: - // - // 1) It is more convenient than checking the NodeRef, traversing it to - // the Node, and finding the appropriate field in Node.Status.NodeInfo - // (which uses different field names and formatting). - // 2) It removes some of the dependency on the structure of the Node, - // so that if the structure of Node.Status.NodeInfo changes, only - // machine controllers need to be updated, rather than every client - // of the Machines API. - // 3) There is no other simple way to check the control plane - // version. A client would have to connect directly to the apiserver - // running on the target node in order to find out its version. - // +optional - Versions *MachineVersionInfo `json:"versions,omitempty"` - // ErrorReason will be set in the event that there is a terminal problem // reconciling the Machine and will contain a succinct value suitable // for machine interpretation. @@ -177,14 +151,6 @@ type MachineStatus struct { // +optional Addresses []corev1.NodeAddress `json:"addresses,omitempty"` - // Conditions lists the conditions synced from the node conditions of the corresponding node-object. - // Machine-controller is responsible for keeping conditions up-to-date. - // MachineSet controller will be taking these conditions as a signal to decide if - // machine is healthy or needs to be replaced. - // Refer: https://kubernetes.io/docs/concepts/architecture/nodes/#condition - // +optional - Conditions []corev1.NodeCondition `json:"conditions,omitempty"` - // LastOperation describes the last-operation performed by the machine-controller. // This API should be useful as a history in terms of the latest operation performed on the // specific machine. It should also convey the state of the latest-operation for example if @@ -215,21 +181,24 @@ type LastOperation struct { Type *string `json:"type,omitempty"` } -/// [MachineStatus] - /// [MachineVersionInfo] -type MachineVersionInfo struct { - // Kubelet is the semantic version of kubelet to run - Kubelet string `json:"kubelet"` - // ControlPlane is the semantic version of the Kubernetes control plane to - // run. This should only be populated when the machine is a - // control plane. - // +optional - ControlPlane string `json:"controlPlane,omitempty"` -} +func (m *Machine) Validate() field.ErrorList { + errors := field.ErrorList{} -/// [MachineVersionInfo] + // validate spec.labels + fldPath := field.NewPath("spec") + if m.Labels[MachineClusterIDLabel] == "" { + errors = append(errors, field.Invalid(fldPath.Child("labels"), m.Labels, fmt.Sprintf("missing %v label.", MachineClusterIDLabel))) + } + + // validate provider config is set + if m.Spec.ProviderSpec.Value == nil { + errors = append(errors, field.Invalid(fldPath.Child("spec").Child("providerspec"), m.Spec.ProviderSpec, "value field must be set")) + } + + return errors +} // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/zz_generated.deepcopy.go index c7535569a8..ec66da1256 100644 --- a/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/zz_generated.deepcopy.go @@ -289,27 +289,6 @@ func (in *MachineClassList) DeepCopyObject() runtime.Object { return nil } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineClassRef) DeepCopyInto(out *MachineClassRef) { - *out = *in - if in.ObjectReference != nil { - in, out := &in.ObjectReference, &out.ObjectReference - *out = new(v1.ObjectReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineClassRef. -func (in *MachineClassRef) DeepCopy() *MachineClassRef { - if in == nil { - return nil - } - out := new(MachineClassRef) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MachineDeployment) DeepCopyInto(out *MachineDeployment) { *out = *in @@ -632,12 +611,6 @@ func (in *MachineSpec) DeepCopyInto(out *MachineSpec) { } } in.ProviderSpec.DeepCopyInto(&out.ProviderSpec) - out.Versions = in.Versions - if in.ConfigSource != nil { - in, out := &in.ConfigSource, &out.ConfigSource - *out = new(v1.NodeConfigSource) - (*in).DeepCopyInto(*out) - } if in.ProviderID != nil { in, out := &in.ProviderID, &out.ProviderID *out = new(string) @@ -668,11 +641,6 @@ func (in *MachineStatus) DeepCopyInto(out *MachineStatus) { in, out := &in.LastUpdated, &out.LastUpdated *out = (*in).DeepCopy() } - if in.Versions != nil { - in, out := &in.Versions, &out.Versions - *out = new(MachineVersionInfo) - **out = **in - } if in.ErrorReason != nil { in, out := &in.ErrorReason, &out.ErrorReason *out = new(common.MachineStatusError) @@ -693,13 +661,6 @@ func (in *MachineStatus) DeepCopyInto(out *MachineStatus) { *out = make([]v1.NodeAddress, len(*in)) copy(*out, *in) } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]v1.NodeCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } if in.LastOperation != nil { in, out := &in.LastOperation, &out.LastOperation *out = new(LastOperation) @@ -741,22 +702,6 @@ func (in *MachineTemplateSpec) DeepCopy() *MachineTemplateSpec { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineVersionInfo) DeepCopyInto(out *MachineVersionInfo) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineVersionInfo. -func (in *MachineVersionInfo) DeepCopy() *MachineVersionInfo { - if in == nil { - return nil - } - out := new(MachineVersionInfo) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NetworkRanges) DeepCopyInto(out *NetworkRanges) { *out = *in @@ -786,11 +731,6 @@ func (in *ProviderSpec) DeepCopyInto(out *ProviderSpec) { *out = new(runtime.RawExtension) (*in).DeepCopyInto(*out) } - if in.ValueFrom != nil { - in, out := &in.ValueFrom, &out.ValueFrom - *out = new(ProviderSpecSource) - (*in).DeepCopyInto(*out) - } return } @@ -803,24 +743,3 @@ func (in *ProviderSpec) DeepCopy() *ProviderSpec { in.DeepCopyInto(out) return out } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProviderSpecSource) DeepCopyInto(out *ProviderSpecSource) { - *out = *in - if in.MachineClass != nil { - in, out := &in.MachineClass, &out.MachineClass - *out = new(MachineClassRef) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderSpecSource. -func (in *ProviderSpecSource) DeepCopy() *ProviderSpecSource { - if in == nil { - return nil - } - out := new(ProviderSpecSource) - in.DeepCopyInto(out) - return out -}