From 1efc258b3c3e348bd72e594a1646544925217923 Mon Sep 17 00:00:00 2001 From: Andrew McDermott Date: Mon, 2 Mar 2020 11:53:35 +0000 Subject: [PATCH 01/10] config/options: add KubeConfigPath Access to this is required by cloudprovider/clusterapi. --- cluster-autoscaler/config/autoscaling_options.go | 2 ++ cluster-autoscaler/main.go | 1 + 2 files changed, 3 insertions(+) diff --git a/cluster-autoscaler/config/autoscaling_options.go b/cluster-autoscaler/config/autoscaling_options.go index 0850874b03e7..10d47173f820 100644 --- a/cluster-autoscaler/config/autoscaling_options.go +++ b/cluster-autoscaler/config/autoscaling_options.go @@ -137,4 +137,6 @@ type AutoscalingOptions struct { IgnoredTaints []string // AWSUseStaticInstanceList tells if AWS cloud provider use static instance type list or dynamically fetch from remote APIs. AWSUseStaticInstanceList bool + // Path to kube configuration if available + KubeConfigPath string } diff --git a/cluster-autoscaler/main.go b/cluster-autoscaler/main.go index 7d1cc5e1177c..cf5f18bd9266 100644 --- a/cluster-autoscaler/main.go +++ b/cluster-autoscaler/main.go @@ -235,6 +235,7 @@ func createAutoscalingOptions() config.AutoscalingOptions { Regional: *regional, NewPodScaleUpDelay: *newPodScaleUpDelay, IgnoredTaints: *ignoreTaintsFlag, + KubeConfigPath: *kubeConfigFile, NodeDeletionDelayTimeout: *nodeDeletionDelayTimeout, AWSUseStaticInstanceList: *awsUseStaticInstanceList, } From b95eeb7b821030b9f50744e9bdc95f9ddf05b87b Mon Sep 17 00:00:00 2001 From: Andrew McDermott Date: Mon, 2 Mar 2020 11:54:22 +0000 Subject: [PATCH 02/10] cloudprovider/builder: add clusterapi Enable cloudprovider/clusterapi. --- .../cloudprovider/builder/builder_all.go | 6 ++- .../builder/builder_clusterapi.go | 42 +++++++++++++++++++ 2 files changed, 47 insertions(+), 1 deletion(-) create mode 100644 cluster-autoscaler/cloudprovider/builder/builder_clusterapi.go diff --git a/cluster-autoscaler/cloudprovider/builder/builder_all.go b/cluster-autoscaler/cloudprovider/builder/builder_all.go index e5047a612d99..37fa63c847de 100644 --- a/cluster-autoscaler/cloudprovider/builder/builder_all.go +++ b/cluster-autoscaler/cloudprovider/builder/builder_all.go @@ -1,4 +1,4 @@ -// +build !gce,!aws,!azure,!kubemark,!alicloud,!magnum,!digitalocean +// +build !gce,!aws,!azure,!kubemark,!alicloud,!magnum,!digitalocean,!clusterapi /* Copyright 2018 The Kubernetes Authors. @@ -24,6 +24,7 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/aws" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/azure" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/baiducloud" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/clusterapi" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/digitalocean" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/gce" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/magnum" @@ -40,6 +41,7 @@ var AvailableCloudProviders = []string{ cloudprovider.BaiducloudProviderName, cloudprovider.MagnumProviderName, cloudprovider.DigitalOceanProviderName, + clusterapi.ProviderName, } // DefaultCloudProvider is GCE. @@ -63,6 +65,8 @@ func buildCloudProvider(opts config.AutoscalingOptions, do cloudprovider.NodeGro return magnum.BuildMagnum(opts, do, rl) case packet.ProviderName: return packet.BuildPacket(opts, do, rl) + case clusterapi.ProviderName: + return clusterapi.BuildClusterAPI(opts, do, rl) } return nil } diff --git a/cluster-autoscaler/cloudprovider/builder/builder_clusterapi.go b/cluster-autoscaler/cloudprovider/builder/builder_clusterapi.go new file mode 100644 index 000000000000..d7e47829e70e --- /dev/null +++ b/cluster-autoscaler/cloudprovider/builder/builder_clusterapi.go @@ -0,0 +1,42 @@ +// +build clusterapi + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/clusterapi" + "k8s.io/autoscaler/cluster-autoscaler/config" +) + +// AvailableCloudProviders supported by the cloud provider builder. +var AvailableCloudProviders = []string{ + clusterapi.ProviderName, +} + +// DefaultCloudProvider for machineapi-only build. +const DefaultCloudProvider = clusterapi.ProviderName + +func buildCloudProvider(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter) cloudprovider.CloudProvider { + switch opts.CloudProviderName { + case clusterapi.ProviderName: + return clusterapi.BuildClusterAPI(opts, do, rl) + } + + return nil +} From 46bb9b4f29c80b6d3e305e8ac2a54266a70e3566 Mon Sep 17 00:00:00 2001 From: Andrew McDermott Date: Mon, 2 Mar 2020 12:21:13 +0000 Subject: [PATCH 03/10] cloudprovider/clusterapi: new provider This adds a new cloudprovider based on the cluster-api project: https://github.com/kubernetes-sigs/cluster-api --- .../cloudprovider/clusterapi/OWNERS | 8 + .../clusterapi/clusterapi_controller.go | 607 +++++++++++ .../clusterapi/clusterapi_controller_test.go | 993 ++++++++++++++++++ .../clusterapi/clusterapi_converters.go | 188 ++++ .../clusterapi_machinedeployment.go | 137 +++ .../clusterapi/clusterapi_machineset.go | 122 +++ .../clusterapi/clusterapi_nodegroup.go | 257 +++++ .../clusterapi/clusterapi_nodegroup_test.go | 846 +++++++++++++++ .../clusterapi/clusterapi_provider.go | 173 +++ .../clusterapi/clusterapi_provider_test.go | 106 ++ .../clusterapi/clusterapi_scalableresource.go | 49 + .../clusterapi/clusterapi_utils.go | 145 +++ .../clusterapi/clusterapi_utils_test.go | 371 +++++++ 13 files changed, 4002 insertions(+) create mode 100644 cluster-autoscaler/cloudprovider/clusterapi/OWNERS create mode 100644 cluster-autoscaler/cloudprovider/clusterapi/clusterapi_controller.go create mode 100644 cluster-autoscaler/cloudprovider/clusterapi/clusterapi_controller_test.go create mode 100644 cluster-autoscaler/cloudprovider/clusterapi/clusterapi_converters.go create mode 100644 cluster-autoscaler/cloudprovider/clusterapi/clusterapi_machinedeployment.go create mode 100644 cluster-autoscaler/cloudprovider/clusterapi/clusterapi_machineset.go create mode 100644 cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup.go create mode 100644 cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup_test.go create mode 100644 cluster-autoscaler/cloudprovider/clusterapi/clusterapi_provider.go create mode 100644 cluster-autoscaler/cloudprovider/clusterapi/clusterapi_provider_test.go create mode 100644 cluster-autoscaler/cloudprovider/clusterapi/clusterapi_scalableresource.go create mode 100644 cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils.go create mode 100644 cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils_test.go diff --git a/cluster-autoscaler/cloudprovider/clusterapi/OWNERS b/cluster-autoscaler/cloudprovider/clusterapi/OWNERS new file mode 100644 index 000000000000..cdf20c0b1525 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/clusterapi/OWNERS @@ -0,0 +1,8 @@ +approvers: +- frobware +- enxebre +- elmiko +reviewers: +- frobware +- enxebre +- elmiko diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_controller.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_controller.go new file mode 100644 index 000000000000..7ecb5ab5b6aa --- /dev/null +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_controller.go @@ -0,0 +1,607 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clusterapi + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/dynamic/dynamicinformer" + "k8s.io/client-go/informers" + kubeinformers "k8s.io/client-go/informers" + kubeclient "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" + "k8s.io/klog" + "k8s.io/utils/pointer" +) + +const ( + machineProviderIDIndex = "machineProviderIDIndex" + nodeProviderIDIndex = "nodeProviderIDIndex" +) + +// machineController watches for Nodes, Machines, MachineSets and +// MachineDeployments as they are added, updated and deleted on the +// cluster. Additionally, it adds indices to the node informers to +// satisfy lookup by node.Spec.ProviderID. +type machineController struct { + kubeInformerFactory kubeinformers.SharedInformerFactory + machineInformerFactory dynamicinformer.DynamicSharedInformerFactory + machineDeploymentInformer informers.GenericInformer + machineInformer informers.GenericInformer + machineSetInformer informers.GenericInformer + nodeInformer cache.SharedIndexInformer + dynamicclient dynamic.Interface + machineSetResource *schema.GroupVersionResource + machineResource *schema.GroupVersionResource + machineDeploymentResource *schema.GroupVersionResource +} + +type machineSetFilterFunc func(machineSet *MachineSet) error + +func indexMachineByProviderID(obj interface{}) ([]string, error) { + u, ok := obj.(*unstructured.Unstructured) + if !ok { + return nil, nil + } + + providerID, found, err := unstructured.NestedString(u.Object, "spec", "providerID") + if err != nil || !found { + return nil, nil + } + if providerID == "" { + return nil, nil + } + + return []string{providerID}, nil +} + +func indexNodeByProviderID(obj interface{}) ([]string, error) { + if node, ok := obj.(*corev1.Node); ok { + if node.Spec.ProviderID != "" { + return []string{node.Spec.ProviderID}, nil + } + return []string{}, nil + } + return []string{}, nil +} + +func (c *machineController) findMachine(id string) (*Machine, error) { + item, exists, err := c.machineInformer.Informer().GetStore().GetByKey(id) + if err != nil { + return nil, err + } + + if !exists { + return nil, nil + } + + u, ok := item.(*unstructured.Unstructured) + if !ok { + return nil, fmt.Errorf("internal error; unexpected type: %T", item) + } + + machine := newMachineFromUnstructured(u.DeepCopy()) + if machine == nil { + return nil, nil + } + + return machine, nil +} + +func (c *machineController) findMachineDeployment(id string) (*MachineDeployment, error) { + item, exists, err := c.machineDeploymentInformer.Informer().GetStore().GetByKey(id) + if err != nil { + return nil, err + } + + if !exists { + return nil, nil + } + + u, ok := item.(*unstructured.Unstructured) + if !ok { + return nil, fmt.Errorf("internal error; unexpected type: %T", item) + } + + machineDeployment := newMachineDeploymentFromUnstructured(u.DeepCopy()) + if machineDeployment == nil { + return nil, nil + } + + return machineDeployment, nil +} + +// findMachineOwner returns the machine set owner for machine, or nil +// if there is no owner. A DeepCopy() of the object is returned on +// success. +func (c *machineController) findMachineOwner(machine *Machine) (*MachineSet, error) { + machineOwnerRef := machineOwnerRef(machine) + if machineOwnerRef == nil { + return nil, nil + } + + store := c.machineSetInformer.Informer().GetStore() + item, exists, err := store.GetByKey(fmt.Sprintf("%s/%s", machine.Namespace, machineOwnerRef.Name)) + if err != nil { + return nil, err + } + if !exists { + return nil, nil + } + + u, ok := item.(*unstructured.Unstructured) + if !ok { + return nil, fmt.Errorf("internal error; unexpected type: %T", item) + } + + u = u.DeepCopy() + machineSet := newMachineSetFromUnstructured(u) + if machineSet == nil { + return nil, nil + } + + if !machineIsOwnedByMachineSet(machine, machineSet) { + return nil, nil + } + + return machineSet, nil +} + +// run starts shared informers and waits for the informer cache to +// synchronize. +func (c *machineController) run(stopCh <-chan struct{}) error { + c.kubeInformerFactory.Start(stopCh) + c.machineInformerFactory.Start(stopCh) + + syncFuncs := []cache.InformerSynced{ + c.nodeInformer.HasSynced, + c.machineInformer.Informer().HasSynced, + c.machineSetInformer.Informer().HasSynced, + c.machineDeploymentInformer.Informer().HasSynced, + } + + klog.V(4).Infof("waiting for caches to sync") + if !cache.WaitForCacheSync(stopCh, syncFuncs...) { + return fmt.Errorf("syncing caches failed") + } + + return nil +} + +// findMachineByProviderID finds machine matching providerID. A +// DeepCopy() of the object is returned on success. +func (c *machineController) findMachineByProviderID(providerID string) (*Machine, error) { + objs, err := c.machineInformer.Informer().GetIndexer().ByIndex(machineProviderIDIndex, providerID) + if err != nil { + return nil, err + } + + switch n := len(objs); { + case n > 1: + return nil, fmt.Errorf("internal error; expected len==1, got %v", n) + case n == 1: + u, ok := objs[0].(*unstructured.Unstructured) + if !ok { + return nil, fmt.Errorf("internal error; unexpected type %T", objs[0]) + } + machine := newMachineFromUnstructured(u.DeepCopy()) + if machine != nil { + return machine, nil + } + } + + // If the machine object has no providerID--maybe actuator + // does not set this value (e.g., OpenStack)--then first + // lookup the node using ProviderID. If that is successful + // then the machine can be found using the annotation (should + // it exist). + node, err := c.findNodeByProviderID(providerID) + if err != nil { + return nil, err + } + if node == nil { + return nil, nil + } + return c.findMachine(node.Annotations[machineAnnotationKey]) +} + +// findNodeByNodeName finds the Node object keyed by name.. Returns +// nil if it cannot be found. A DeepCopy() of the object is returned +// on success. +func (c *machineController) findNodeByNodeName(name string) (*corev1.Node, error) { + item, exists, err := c.nodeInformer.GetIndexer().GetByKey(name) + if err != nil { + return nil, err + } + + if !exists { + return nil, nil + } + + node, ok := item.(*corev1.Node) + if !ok { + return nil, fmt.Errorf("internal error; unexpected type %T", item) + } + + return node.DeepCopy(), nil +} + +// machinesInMachineSet returns all the machines that belong to +// machineSet. For each machine in the set a DeepCopy() of the object +// is returned. +func (c *machineController) machinesInMachineSet(machineSet *MachineSet) ([]*Machine, error) { + machines, err := c.listMachines(machineSet.Namespace, labels.SelectorFromSet(machineSet.Labels)) + if err != nil { + return nil, err + } + if machines == nil { + return nil, nil + } + + var result []*Machine + + for _, machine := range machines { + if machineIsOwnedByMachineSet(machine, machineSet) { + result = append(result, machine) + } + } + + return result, nil +} + +// newMachineController constructs a controller that watches Nodes, +// Machines and MachineSet as they are added, updated and deleted on +// the cluster. +func newMachineController( + dynamicclient dynamic.Interface, + kubeclient kubeclient.Interface, +) (*machineController, error) { + kubeInformerFactory := kubeinformers.NewSharedInformerFactory(kubeclient, 0) + informerFactory := dynamicinformer.NewFilteredDynamicSharedInformerFactory(dynamicclient, 0, metav1.NamespaceAll, nil) + + machineDeploymentResource, _ := schema.ParseResourceArg("machinedeployments.v1alpha2.cluster.x-k8s.io") + + machineSetResource, _ := schema.ParseResourceArg("machinesets.v1alpha2.cluster.x-k8s.io") + if machineSetResource == nil { + panic("MachineSetResource") + } + + machineResource, _ := schema.ParseResourceArg("machines.v1alpha2.cluster.x-k8s.io") + if machineResource == nil { + panic("machineResource") + } + machineInformer := informerFactory.ForResource(*machineResource) + machineSetInformer := informerFactory.ForResource(*machineSetResource) + var machineDeploymentInformer informers.GenericInformer + + machineDeploymentInformer = informerFactory.ForResource(*machineDeploymentResource) + machineDeploymentInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{}) + + machineInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{}) + machineSetInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{}) + + nodeInformer := kubeInformerFactory.Core().V1().Nodes().Informer() + nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{}) + + if err := machineInformer.Informer().GetIndexer().AddIndexers(cache.Indexers{ + machineProviderIDIndex: indexMachineByProviderID, + }); err != nil { + return nil, fmt.Errorf("cannot add machine indexer: %v", err) + } + + if err := nodeInformer.GetIndexer().AddIndexers(cache.Indexers{ + nodeProviderIDIndex: indexNodeByProviderID, + }); err != nil { + return nil, fmt.Errorf("cannot add node indexer: %v", err) + } + + return &machineController{ + kubeInformerFactory: kubeInformerFactory, + machineInformerFactory: informerFactory, + machineDeploymentInformer: machineDeploymentInformer, + machineInformer: machineInformer, + machineSetInformer: machineSetInformer, + nodeInformer: nodeInformer, + dynamicclient: dynamicclient, + machineSetResource: machineSetResource, + machineResource: machineResource, + machineDeploymentResource: machineDeploymentResource, + }, nil +} + +func (c *machineController) machineSetProviderIDs(machineSet *MachineSet) ([]string, error) { + machines, err := c.machinesInMachineSet(machineSet) + if err != nil { + return nil, fmt.Errorf("error listing machines: %v", err) + } + + var nodes []string + + for _, machine := range machines { + if machine.Spec.ProviderID != nil && *machine.Spec.ProviderID != "" { + // Prefer machine<=>node mapping using ProviderID + node, err := c.findNodeByProviderID(*machine.Spec.ProviderID) + if err != nil { + return nil, err + } + if node != nil { + nodes = append(nodes, node.Spec.ProviderID) + continue + } + } + + if machine.Status.NodeRef == nil { + klog.V(4).Infof("Status.NodeRef of machine %q is currently nil", machine.Name) + continue + } + if machine.Status.NodeRef.Kind != "Node" { + klog.Errorf("Status.NodeRef of machine %q does not reference a node (rather %q)", machine.Name, machine.Status.NodeRef.Kind) + continue + } + + node, err := c.findNodeByNodeName(machine.Status.NodeRef.Name) + if err != nil { + return nil, fmt.Errorf("unknown node %q", machine.Status.NodeRef.Name) + } + + if node != nil { + nodes = append(nodes, node.Spec.ProviderID) + } + } + + klog.V(4).Infof("nodegroup %s has nodes %v", machineSet.Name, nodes) + + return nodes, nil +} + +func (c *machineController) filterAllMachineSets(f machineSetFilterFunc) error { + return c.filterMachineSets(metav1.NamespaceAll, f) +} + +func (c *machineController) filterMachineSets(namespace string, f machineSetFilterFunc) error { + machineSets, err := c.listMachineSets(namespace, labels.Everything()) + if err != nil { + return nil + } + for _, machineSet := range machineSets { + if err := f(machineSet); err != nil { + return err + } + } + return nil +} + +func (c *machineController) machineSetNodeGroups() ([]*nodegroup, error) { + var nodegroups []*nodegroup + + if err := c.filterAllMachineSets(func(machineSet *MachineSet) error { + if machineSetHasMachineDeploymentOwnerRef(machineSet) { + return nil + } + ng, err := newNodegroupFromMachineSet(c, machineSet) + if err != nil { + return err + } + if ng.MaxSize()-ng.MinSize() > 0 && pointer.Int32PtrDerefOr(machineSet.Spec.Replicas, 0) > 0 { + nodegroups = append(nodegroups, ng) + } + return nil + }); err != nil { + return nil, err + } + + return nodegroups, nil +} + +func (c *machineController) machineDeploymentNodeGroups() ([]*nodegroup, error) { + machineDeployments, err := c.listMachineDeployments(metav1.NamespaceAll, labels.Everything()) + if err != nil { + return nil, err + } + + var nodegroups []*nodegroup + + for _, md := range machineDeployments { + ng, err := newNodegroupFromMachineDeployment(c, md) + if err != nil { + return nil, err + } + // add nodegroup iff it has the capacity to scale + if ng.MaxSize()-ng.MinSize() > 0 && pointer.Int32PtrDerefOr(md.Spec.Replicas, 0) > 0 { + nodegroups = append(nodegroups, ng) + } + } + + return nodegroups, nil +} + +func (c *machineController) nodeGroups() ([]*nodegroup, error) { + machineSets, err := c.machineSetNodeGroups() + if err != nil { + return nil, err + } + + machineDeployments, err := c.machineDeploymentNodeGroups() + if err != nil { + return nil, err + } + return append(machineSets, machineDeployments...), nil +} + +func (c *machineController) nodeGroupForNode(node *corev1.Node) (*nodegroup, error) { + machine, err := c.findMachineByProviderID(node.Spec.ProviderID) + if err != nil { + return nil, err + } + if machine == nil { + return nil, nil + } + + machineSet, err := c.findMachineOwner(machine) + if err != nil { + return nil, err + } + + if machineSet == nil { + return nil, nil + } + + if ref := machineSetMachineDeploymentRef(machineSet); ref != nil { + key := fmt.Sprintf("%s/%s", machineSet.Namespace, ref.Name) + machineDeployment, err := c.findMachineDeployment(key) + if err != nil { + return nil, fmt.Errorf("unknown MachineDeployment %q: %v", key, err) + } + if machineDeployment == nil { + return nil, fmt.Errorf("unknown MachineDeployment %q", key) + } + nodegroup, err := newNodegroupFromMachineDeployment(c, machineDeployment) + if err != nil { + return nil, fmt.Errorf("failed to build nodegroup for node %q: %v", node.Name, err) + } + // We don't scale from 0 so nodes must belong + // to a nodegroup that has a scale size of at + // least 1. + if nodegroup.MaxSize()-nodegroup.MinSize() < 1 { + return nil, nil + } + return nodegroup, nil + } + + nodegroup, err := newNodegroupFromMachineSet(c, machineSet) + if err != nil { + return nil, fmt.Errorf("failed to build nodegroup for node %q: %v", node.Name, err) + } + + // We don't scale from 0 so nodes must belong to a nodegroup + // that has a scale size of at least 1. + if nodegroup.MaxSize()-nodegroup.MinSize() < 1 { + return nil, nil + } + + klog.V(4).Infof("node %q is in nodegroup %q", node.Name, machineSet.Name) + return nodegroup, nil +} + +// findNodeByProviderID find the Node object keyed by provideID. +// Returns nil if it cannot be found. A DeepCopy() of the object is +// returned on success. +func (c *machineController) findNodeByProviderID(providerID string) (*corev1.Node, error) { + objs, err := c.nodeInformer.GetIndexer().ByIndex(nodeProviderIDIndex, providerID) + if err != nil { + return nil, err + } + + switch n := len(objs); { + case n == 0: + return nil, nil + case n > 1: + return nil, fmt.Errorf("internal error; expected len==1, got %v", n) + } + + node, ok := objs[0].(*corev1.Node) + if !ok { + return nil, fmt.Errorf("internal error; unexpected type %T", objs[0]) + } + + return node.DeepCopy(), nil +} + +func (c *machineController) getMachine(namespace, name string, options metav1.GetOptions) (*Machine, error) { + u, err := c.dynamicclient.Resource(*c.machineResource).Namespace(namespace).Get(context.TODO(), name, options) + if err != nil { + return nil, err + } + return newMachineFromUnstructured(u.DeepCopy()), nil +} + +func (c *machineController) getMachineSet(namespace, name string, options metav1.GetOptions) (*MachineSet, error) { + u, err := c.dynamicclient.Resource(*c.machineSetResource).Namespace(namespace).Get(context.TODO(), name, options) + if err != nil { + return nil, err + } + return newMachineSetFromUnstructured(u.DeepCopy()), nil +} + +func (c *machineController) getMachineDeployment(namespace, name string, options metav1.GetOptions) (*MachineDeployment, error) { + u, err := c.dynamicclient.Resource(*c.machineDeploymentResource).Namespace(namespace).Get(context.TODO(), name, options) + if err != nil { + return nil, err + } + return newMachineDeploymentFromUnstructured(u.DeepCopy()), nil +} + +func (c *machineController) listMachines(namespace string, selector labels.Selector) ([]*Machine, error) { + objs, err := c.machineInformer.Lister().ByNamespace(namespace).List(selector) + if err != nil { + return nil, err + } + + var machines []*Machine + + for _, x := range objs { + u := x.(*unstructured.Unstructured).DeepCopy() + if machine := newMachineFromUnstructured(u); machine != nil { + machines = append(machines, machine) + } + } + + return machines, nil +} + +func (c *machineController) listMachineSets(namespace string, selector labels.Selector) ([]*MachineSet, error) { + objs, err := c.machineSetInformer.Lister().ByNamespace(namespace).List(selector) + if err != nil { + return nil, err + } + + var machineSets []*MachineSet + + for _, x := range objs { + u := x.(*unstructured.Unstructured).DeepCopy() + if machineSet := newMachineSetFromUnstructured(u); machineSet != nil { + machineSets = append(machineSets, machineSet) + } + } + + return machineSets, nil +} + +func (c *machineController) listMachineDeployments(namespace string, selector labels.Selector) ([]*MachineDeployment, error) { + objs, err := c.machineDeploymentInformer.Lister().ByNamespace(namespace).List(selector) + if err != nil { + return nil, err + } + + var machineDeployments []*MachineDeployment + + for _, x := range objs { + u := x.(*unstructured.Unstructured).DeepCopy() + if machineDeployment := newMachineDeploymentFromUnstructured(u); machineDeployment != nil { + machineDeployments = append(machineDeployments, machineDeployment) + } + } + + return machineDeployments, nil +} diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_controller_test.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_controller_test.go new file mode 100644 index 000000000000..646b18174ba0 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_controller_test.go @@ -0,0 +1,993 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clusterapi + +import ( + "fmt" + "path" + "reflect" + "sort" + "strings" + "testing" + + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + fakedynamic "k8s.io/client-go/dynamic/fake" + fakekube "k8s.io/client-go/kubernetes/fake" + "k8s.io/utils/pointer" +) + +type testControllerShutdownFunc func() + +type testConfig struct { + spec *testSpec + machineDeployment *MachineDeployment + machineSet *MachineSet + machines []*Machine + nodes []*corev1.Node +} + +type testSpec struct { + annotations map[string]string + machineDeploymentName string + machineSetName string + namespace string + nodeCount int + rootIsMachineDeployment bool +} + +func mustCreateTestController(t *testing.T, testConfigs ...*testConfig) (*machineController, testControllerShutdownFunc) { + t.Helper() + + nodeObjects := make([]runtime.Object, 0) + machineObjects := make([]runtime.Object, 0) + + for _, config := range testConfigs { + for i := range config.nodes { + nodeObjects = append(nodeObjects, config.nodes[i]) + } + + for i := range config.machines { + machineObjects = append(machineObjects, newUnstructuredFromMachine(config.machines[i])) + } + + machineObjects = append(machineObjects, newUnstructuredFromMachineSet(config.machineSet)) + if config.machineDeployment != nil { + machineObjects = append(machineObjects, newUnstructuredFromMachineDeployment(config.machineDeployment)) + } + } + + kubeclientSet := fakekube.NewSimpleClientset(nodeObjects...) + dynamicClientset := fakedynamic.NewSimpleDynamicClient(runtime.NewScheme(), machineObjects...) + controller, err := newMachineController(dynamicClientset, kubeclientSet) + if err != nil { + t.Fatal("failed to create test controller") + } + + stopCh := make(chan struct{}) + if err := controller.run(stopCh); err != nil { + t.Fatalf("failed to run controller: %v", err) + } + + return controller, func() { + close(stopCh) + } +} + +func createMachineSetTestConfig(namespace string, nodeCount int, annotations map[string]string) *testConfig { + return createTestConfigs(createTestSpecs(namespace, 1, nodeCount, false, annotations)...)[0] +} + +func createMachineSetTestConfigs(namespace string, configCount, nodeCount int, annotations map[string]string) []*testConfig { + return createTestConfigs(createTestSpecs(namespace, configCount, nodeCount, false, annotations)...) +} + +func createMachineDeploymentTestConfig(namespace string, nodeCount int, annotations map[string]string) *testConfig { + return createTestConfigs(createTestSpecs(namespace, 1, nodeCount, true, annotations)...)[0] +} + +func createMachineDeploymentTestConfigs(namespace string, configCount, nodeCount int, annotations map[string]string) []*testConfig { + return createTestConfigs(createTestSpecs(namespace, configCount, nodeCount, true, annotations)...) +} + +func createTestSpecs(namespace string, scalableResourceCount, nodeCount int, isMachineDeployment bool, annotations map[string]string) []testSpec { + var specs []testSpec + + for i := 0; i < scalableResourceCount; i++ { + specs = append(specs, testSpec{ + annotations: annotations, + machineDeploymentName: fmt.Sprintf("machinedeployment-%d", i), + machineSetName: fmt.Sprintf("machineset-%d", i), + namespace: strings.ToLower(namespace), + nodeCount: nodeCount, + rootIsMachineDeployment: isMachineDeployment, + }) + } + + return specs +} + +func createTestConfigs(specs ...testSpec) []*testConfig { + var result []*testConfig + + for i, spec := range specs { + config := &testConfig{ + spec: &specs[i], + nodes: make([]*corev1.Node, spec.nodeCount), + machines: make([]*Machine, spec.nodeCount), + } + + config.machineSet = &MachineSet{ + TypeMeta: v1.TypeMeta{ + APIVersion: "cluster.x-k8s.io/v1alpha2", + Kind: "MachineSet", + }, + ObjectMeta: v1.ObjectMeta{ + Name: spec.machineSetName, + Namespace: spec.namespace, + UID: types.UID(spec.machineSetName), + }, + } + + if !spec.rootIsMachineDeployment { + config.machineSet.ObjectMeta.Annotations = spec.annotations + config.machineSet.Spec.Replicas = int32ptr(int32(spec.nodeCount)) + } else { + config.machineDeployment = &MachineDeployment{ + TypeMeta: v1.TypeMeta{ + APIVersion: "cluster.x-k8s.io/v1alpha2", + Kind: "MachineDeployment", + }, + ObjectMeta: v1.ObjectMeta{ + Name: spec.machineDeploymentName, + Namespace: spec.namespace, + UID: types.UID(spec.machineDeploymentName), + Annotations: spec.annotations, + }, + Spec: MachineDeploymentSpec{ + Replicas: int32ptr(int32(spec.nodeCount)), + }, + } + + config.machineSet.OwnerReferences = make([]v1.OwnerReference, 1) + config.machineSet.OwnerReferences[0] = v1.OwnerReference{ + Name: config.machineDeployment.Name, + Kind: config.machineDeployment.Kind, + UID: config.machineDeployment.UID, + } + } + + machineOwner := v1.OwnerReference{ + Name: config.machineSet.Name, + Kind: config.machineSet.Kind, + UID: config.machineSet.UID, + } + + for j := 0; j < spec.nodeCount; j++ { + config.nodes[j], config.machines[j] = makeLinkedNodeAndMachine(j, spec.namespace, machineOwner) + } + + result = append(result, config) + } + + return result +} + +// makeLinkedNodeAndMachine creates a node and machine. The machine +// has its NodeRef set to the new node and the new machine's owner +// reference is set to owner. +func makeLinkedNodeAndMachine(i int, namespace string, owner v1.OwnerReference) (*corev1.Node, *Machine) { + node := &corev1.Node{ + TypeMeta: v1.TypeMeta{ + Kind: "Node", + }, + ObjectMeta: v1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s-node-%d", namespace, owner.Name, i), + Annotations: map[string]string{ + machineAnnotationKey: fmt.Sprintf("%s/%s-%s-machine-%d", namespace, namespace, owner.Name, i), + }, + }, + Spec: corev1.NodeSpec{ + ProviderID: fmt.Sprintf("%s-%s-nodeid-%d", namespace, owner.Name, i), + }, + } + + machine := &Machine{ + TypeMeta: v1.TypeMeta{ + APIVersion: "cluster.x-k8s.io/v1alpha2", + Kind: "Machine", + }, + ObjectMeta: v1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s-machine-%d", namespace, owner.Name, i), + Namespace: namespace, + OwnerReferences: []v1.OwnerReference{{ + Name: owner.Name, + Kind: owner.Kind, + UID: owner.UID, + }}, + }, + Spec: MachineSpec{ + ProviderID: pointer.StringPtr(fmt.Sprintf("%s-%s-nodeid-%d", namespace, owner.Name, i)), + }, + Status: MachineStatus{ + NodeRef: &corev1.ObjectReference{ + Kind: node.Kind, + Name: node.Name, + }, + }, + } + + return node, machine +} + +func int32ptr(v int32) *int32 { + return &v +} + +func addTestConfigs(t *testing.T, controller *machineController, testConfigs ...*testConfig) error { + t.Helper() + + for _, config := range testConfigs { + if config.machineDeployment != nil { + + if err := controller.machineDeploymentInformer.Informer().GetStore().Add(newUnstructuredFromMachineDeployment(config.machineDeployment)); err != nil { + return err + } + } + if err := controller.machineSetInformer.Informer().GetStore().Add(newUnstructuredFromMachineSet(config.machineSet)); err != nil { + return err + } + for i := range config.machines { + if err := controller.machineInformer.Informer().GetStore().Add(newUnstructuredFromMachine(config.machines[i])); err != nil { + return err + } + } + for i := range config.nodes { + if err := controller.nodeInformer.GetStore().Add(config.nodes[i]); err != nil { + return err + } + } + } + return nil +} + +func deleteTestConfigs(t *testing.T, controller *machineController, testConfigs ...*testConfig) error { + t.Helper() + + for _, config := range testConfigs { + for i := range config.nodes { + if err := controller.nodeInformer.GetStore().Delete(config.nodes[i]); err != nil { + return err + } + } + for i := range config.machines { + if err := controller.machineInformer.Informer().GetStore().Delete(config.machines[i]); err != nil { + return err + } + } + if err := controller.machineSetInformer.Informer().GetStore().Delete(config.machineSet); err != nil { + return err + } + if config.machineDeployment != nil { + if err := controller.machineDeploymentInformer.Informer().GetStore().Delete(config.machineDeployment); err != nil { + return err + } + } + } + return nil +} + +func TestControllerFindMachineByID(t *testing.T) { + type testCase struct { + description string + name string + namespace string + lookupSucceeds bool + } + + var testCases = []testCase{{ + description: "lookup fails", + lookupSucceeds: false, + name: "machine-does-not-exist", + namespace: "namespace-does-not-exist", + }, { + description: "lookup fails in valid namespace", + lookupSucceeds: false, + name: "machine-does-not-exist-in-existing-namespace", + }, { + description: "lookup succeeds", + lookupSucceeds: true, + }} + + test := func(t *testing.T, tc testCase, testConfig *testConfig) { + controller, stop := mustCreateTestController(t, testConfig) + defer stop() + + machine, err := controller.findMachine(path.Join(tc.namespace, tc.name)) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if tc.lookupSucceeds && machine == nil { + t.Error("expected success, findMachine failed") + } + + if tc.lookupSucceeds && machine != nil { + if machine.Name != tc.name { + t.Errorf("expected %q, got %q", tc.name, machine.Name) + } + if machine.Namespace != tc.namespace { + t.Errorf("expected %q, got %q", tc.namespace, machine.Namespace) + } + } + } + + for _, tc := range testCases { + t.Run(tc.description, func(t *testing.T) { + testConfig := createMachineSetTestConfig(testNamespace, 1, map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "10", + }) + if tc.name == "" { + tc.name = testConfig.machines[0].Name + } + if tc.namespace == "" { + tc.namespace = testConfig.machines[0].Namespace + } + test(t, tc, testConfig) + }) + } +} + +func TestControllerFindMachineOwner(t *testing.T) { + testConfig := createMachineSetTestConfig(testNamespace, 1, map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "10", + }) + + controller, stop := mustCreateTestController(t, testConfig) + defer stop() + + // Test #1: Lookup succeeds + testResult1, err := controller.findMachineOwner(testConfig.machines[0].DeepCopy()) + if err != nil { + t.Fatalf("unexpected error, got %v", err) + } + if testResult1 == nil { + t.Fatal("expected non-nil result") + } + if testConfig.spec.machineSetName != testResult1.Name { + t.Errorf("expected %q, got %q", testConfig.spec.machineSetName, testResult1.Name) + } + + // Test #2: Lookup fails as the machine UUID != machineset UUID + testMachine2 := testConfig.machines[0].DeepCopy() + testMachine2.OwnerReferences[0].UID = "does-not-match-machineset" + testResult2, err := controller.findMachineOwner(testMachine2) + if err != nil { + t.Fatalf("unexpected error, got %v", err) + } + if testResult2 != nil { + t.Fatal("expected nil result") + } + + // Test #3: Delete the MachineSet and lookup should fail + if err := controller.machineSetInformer.Informer().GetStore().Delete(testResult1); err != nil { + t.Fatalf("unexpected error, got %v", err) + } + testResult3, err := controller.findMachineOwner(testConfig.machines[0].DeepCopy()) + if err != nil { + t.Fatalf("unexpected error, got %v", err) + } + if testResult3 != nil { + t.Fatal("expected lookup to fail") + } +} + +func TestControllerFindMachineByProviderID(t *testing.T) { + testConfig := createMachineSetTestConfig(testNamespace, 1, map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "10", + }) + + controller, stop := mustCreateTestController(t, testConfig) + defer stop() + + // Remove all the "machine" annotation values on all the + // nodes. We want to force findMachineByProviderID() to only + // be successful by searching on provider ID. + for _, node := range testConfig.nodes { + delete(node.Annotations, machineAnnotationKey) + if err := controller.nodeInformer.GetStore().Update(node); err != nil { + t.Fatalf("unexpected error updating node, got %v", err) + } + } + + // Test #1: Verify underlying machine provider ID matches + machine, err := controller.findMachineByProviderID(testConfig.nodes[0].Spec.ProviderID) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if machine == nil { + t.Fatal("expected to find machine") + } + + if !reflect.DeepEqual(machine, testConfig.machines[0]) { + t.Fatalf("expected machines to be equal - expected %+v, got %+v", testConfig.machines[0], machine) + } + + // Test #2: Verify machine is not found if it has a + // non-existent or different provider ID. + machine = testConfig.machines[0].DeepCopy() + machine.Spec.ProviderID = pointer.StringPtr("does-not-match") + if err := controller.machineInformer.Informer().GetStore().Update(machine); err != nil { + t.Fatalf("unexpected error updating machine, got %v", err) + } + machine, err = controller.findMachineByProviderID(testConfig.nodes[0].Spec.ProviderID) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if machine != nil { + t.Fatal("expected find to fail") + } +} + +func TestControllerFindNodeByNodeName(t *testing.T) { + testConfig := createMachineSetTestConfig(testNamespace, 1, map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "10", + }) + + controller, stop := mustCreateTestController(t, testConfig) + defer stop() + + // Test #1: Verify known node can be found + node, err := controller.findNodeByNodeName(testConfig.nodes[0].Name) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if node == nil { + t.Fatal("expected lookup to be successful") + } + + // Test #2: Verify non-existent node cannot be found + node, err = controller.findNodeByNodeName(testConfig.nodes[0].Name + "non-existent") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if node != nil { + t.Fatal("expected lookup to fail") + } +} + +func TestControllerMachinesInMachineSet(t *testing.T) { + testConfig1 := createMachineSetTestConfig("testConfig1", 5, map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "10", + }) + + controller, stop := mustCreateTestController(t, testConfig1) + defer stop() + + // Construct a second set of objects and add the machines, + // nodes and the additional machineset to the existing set of + // test objects in the controller. This gives us two + // machinesets, each with their own machines and linked nodes. + testConfig2 := createMachineSetTestConfig("testConfig2", 5, map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "10", + }) + + if err := addTestConfigs(t, controller, testConfig2); err != nil { + t.Fatalf("unexpected error: %v", err) + } + + machinesInTestObjs1, err := controller.listMachines(testConfig1.spec.namespace, labels.Everything()) + if err != nil { + t.Fatalf("error listing machines: %v", err) + } + + machinesInTestObjs2, err := controller.listMachines(testConfig2.spec.namespace, labels.Everything()) + if err != nil { + t.Fatalf("error listing machines: %v", err) + } + + actual := len(machinesInTestObjs1) + len(machinesInTestObjs2) + expected := len(testConfig1.machines) + len(testConfig2.machines) + if actual != expected { + t.Fatalf("expected %d machines, got %d", expected, actual) + } + + // Sort results as order is not guaranteed. + sort.Slice(machinesInTestObjs1, func(i, j int) bool { + return machinesInTestObjs1[i].Name < machinesInTestObjs1[j].Name + }) + sort.Slice(machinesInTestObjs2, func(i, j int) bool { + return machinesInTestObjs2[i].Name < machinesInTestObjs2[j].Name + }) + + for i, m := range machinesInTestObjs1 { + if m.Name != testConfig1.machines[i].Name { + t.Errorf("expected %q, got %q", testConfig1.machines[i].Name, m.Name) + } + if m.Namespace != testConfig1.machines[i].Namespace { + t.Errorf("expected %q, got %q", testConfig1.machines[i].Namespace, m.Namespace) + } + } + + for i, m := range machinesInTestObjs2 { + if m.Name != testConfig2.machines[i].Name { + t.Errorf("expected %q, got %q", testConfig2.machines[i].Name, m.Name) + } + if m.Namespace != testConfig2.machines[i].Namespace { + t.Errorf("expected %q, got %q", testConfig2.machines[i].Namespace, m.Namespace) + } + } + + // Finally everything in the respective objects should be equal. + if !reflect.DeepEqual(testConfig1.machines, machinesInTestObjs1) { + t.Fatalf("expected %+v, got %+v", testConfig1.machines, machinesInTestObjs1) + } + if !reflect.DeepEqual(testConfig2.machines, machinesInTestObjs2) { + t.Fatalf("expected %+v, got %+v", testConfig2.machines, machinesInTestObjs2) + } +} + +func TestControllerLookupNodeGroupForNonExistentNode(t *testing.T) { + testConfig := createMachineSetTestConfig(testNamespace, 1, map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "10", + }) + + controller, stop := mustCreateTestController(t, testConfig) + defer stop() + + node := testConfig.nodes[0].DeepCopy() + node.Spec.ProviderID = "does-not-exist" + + ng, err := controller.nodeGroupForNode(node) + + // Looking up a node that doesn't exist doesn't generate an + // error. But, equally, the ng should actually be nil. + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if ng != nil { + t.Fatalf("unexpected nodegroup: %v", ng) + } +} + +func TestControllerNodeGroupForNodeWithMissingMachineOwner(t *testing.T) { + test := func(t *testing.T, testConfig *testConfig) { + controller, stop := mustCreateTestController(t, testConfig) + defer stop() + + machine := testConfig.machines[0].DeepCopy() + machine.OwnerReferences = []v1.OwnerReference{} + if err := controller.machineInformer.Informer().GetStore().Update(newUnstructuredFromMachine(machine)); err != nil { + t.Fatalf("unexpected error updating machine, got %v", err) + } + + ng, err := controller.nodeGroupForNode(testConfig.nodes[0]) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if ng != nil { + t.Fatalf("unexpected nodegroup: %v", ng) + } + } + + t.Run("MachineSet", func(t *testing.T) { + testConfig := createMachineSetTestConfig(testNamespace, 1, map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "10", + }) + test(t, testConfig) + }) + + t.Run("MachineDeployment", func(t *testing.T) { + testConfig := createMachineDeploymentTestConfig(testNamespace, 1, map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "10", + }) + test(t, testConfig) + }) +} + +func TestControllerNodeGroupForNodeWithPositiveScalingBounds(t *testing.T) { + test := func(t *testing.T, testConfig *testConfig) { + controller, stop := mustCreateTestController(t, testConfig) + defer stop() + + ng, err := controller.nodeGroupForNode(testConfig.nodes[0]) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + // We don't scale from 0 so nodes must belong to a + // nodegroup that has a scale size of at least 1. + if ng != nil { + t.Fatalf("unexpected nodegroup: %v", ng) + } + } + + t.Run("MachineSet", func(t *testing.T) { + testConfig := createMachineSetTestConfig(testNamespace, 1, map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "1", + }) + test(t, testConfig) + }) + + t.Run("MachineDeployment", func(t *testing.T) { + testConfig := createMachineDeploymentTestConfig(testNamespace, 1, map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "1", + }) + test(t, testConfig) + }) +} + +func TestControllerNodeGroups(t *testing.T) { + assertNodegroupLen := func(t *testing.T, controller *machineController, expected int) { + t.Helper() + nodegroups, err := controller.nodeGroups() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if got := len(nodegroups); got != expected { + t.Fatalf("expected %d, got %d", expected, got) + } + } + + annotations := map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "2", + } + + controller, stop := mustCreateTestController(t) + defer stop() + + // Test #1: zero nodegroups + assertNodegroupLen(t, controller, 0) + + // Test #2: add 5 machineset-based nodegroups + machineSetConfigs := createMachineSetTestConfigs("MachineSet", 5, 1, annotations) + if err := addTestConfigs(t, controller, machineSetConfigs...); err != nil { + t.Fatalf("unexpected error: %v", err) + } + assertNodegroupLen(t, controller, 5) + + // Test #2: add 2 machinedeployment-based nodegroups + machineDeploymentConfigs := createMachineDeploymentTestConfigs("MachineDeployment", 2, 1, annotations) + if err := addTestConfigs(t, controller, machineDeploymentConfigs...); err != nil { + t.Fatalf("unexpected error: %v", err) + } + assertNodegroupLen(t, controller, 7) + + // Test #3: delete 5 machineset-backed objects + if err := deleteTestConfigs(t, controller, machineSetConfigs...); err != nil { + t.Fatalf("unexpected error: %v", err) + } + assertNodegroupLen(t, controller, 2) + + // Test #4: delete 2 machinedeployment-backed objects + if err := deleteTestConfigs(t, controller, machineDeploymentConfigs...); err != nil { + t.Fatalf("unexpected error: %v", err) + } + assertNodegroupLen(t, controller, 0) + + annotations = map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "1", + } + + // Test #5: machineset with no scaling bounds results in no nodegroups + machineSetConfigs = createMachineSetTestConfigs("MachineSet", 5, 1, annotations) + if err := addTestConfigs(t, controller, machineSetConfigs...); err != nil { + t.Fatalf("unexpected error: %v", err) + } + assertNodegroupLen(t, controller, 0) + + // Test #6: machinedeployment with no scaling bounds results in no nodegroups + machineDeploymentConfigs = createMachineDeploymentTestConfigs("MachineDeployment", 2, 1, annotations) + if err := addTestConfigs(t, controller, machineDeploymentConfigs...); err != nil { + t.Fatalf("unexpected error: %v", err) + } + assertNodegroupLen(t, controller, 0) + + annotations = map[string]string{ + nodeGroupMinSizeAnnotationKey: "-1", + nodeGroupMaxSizeAnnotationKey: "1", + } + + // Test #7: machineset with bad scaling bounds results in an error and no nodegroups + machineSetConfigs = createMachineSetTestConfigs("MachineSet", 5, 1, annotations) + if err := addTestConfigs(t, controller, machineSetConfigs...); err != nil { + t.Fatalf("unexpected error: %v", err) + } + if _, err := controller.nodeGroups(); err == nil { + t.Fatalf("expected an error") + } + + // Test #8: machinedeployment with bad scaling bounds results in an error and no nodegroups + machineDeploymentConfigs = createMachineDeploymentTestConfigs("MachineDeployment", 2, 1, annotations) + if err := addTestConfigs(t, controller, machineDeploymentConfigs...); err != nil { + t.Fatalf("unexpected error: %v", err) + } + if _, err := controller.nodeGroups(); err == nil { + t.Fatalf("expected an error") + } +} + +func TestControllerNodeGroupsNodeCount(t *testing.T) { + type testCase struct { + nodeGroups int + nodesPerGroup int + expectedNodeGroups int + expectedNodesPerGroup int + } + + var testCases = []testCase{{ + nodeGroups: 0, + nodesPerGroup: 0, + expectedNodeGroups: 0, + expectedNodesPerGroup: 0, + }, { + nodeGroups: 1, + nodesPerGroup: 0, + expectedNodeGroups: 0, + expectedNodesPerGroup: 0, + }, { + nodeGroups: 2, + nodesPerGroup: 10, + expectedNodeGroups: 2, + expectedNodesPerGroup: 10, + }} + + test := func(t *testing.T, tc testCase, testConfigs []*testConfig) { + controller, stop := mustCreateTestController(t, testConfigs...) + defer stop() + + nodegroups, err := controller.nodeGroups() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if got := len(nodegroups); got != tc.expectedNodeGroups { + t.Fatalf("expected %d, got %d", tc.expectedNodeGroups, got) + } + + for i := range nodegroups { + nodes, err := nodegroups[i].Nodes() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if got := len(nodes); got != tc.expectedNodesPerGroup { + t.Fatalf("expected %d, got %d", tc.expectedNodesPerGroup, got) + } + } + } + + annotations := map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "10", + } + + t.Run("MachineSet", func(t *testing.T) { + for _, tc := range testCases { + test(t, tc, createMachineSetTestConfigs(testNamespace, tc.nodeGroups, tc.nodesPerGroup, annotations)) + } + }) + + t.Run("MachineDeployment", func(t *testing.T) { + for _, tc := range testCases { + test(t, tc, createMachineDeploymentTestConfigs(testNamespace, tc.nodeGroups, tc.nodesPerGroup, annotations)) + } + }) +} + +func TestControllerFindMachineFromNodeAnnotation(t *testing.T) { + testConfig := createMachineSetTestConfig(testNamespace, 1, map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "10", + }) + + controller, stop := mustCreateTestController(t, testConfig) + defer stop() + + // Remove all the provider ID values on all the machines. We + // want to force findMachineByProviderID() to fallback to + // searching using the annotation on the node object. + for _, machine := range testConfig.machines { + machine.Spec.ProviderID = nil + if err := controller.machineInformer.Informer().GetStore().Update(newUnstructuredFromMachine(machine)); err != nil { + t.Fatalf("unexpected error updating machine, got %v", err) + } + } + + // Test #1: Verify machine can be found from node annotation + machine, err := controller.findMachineByProviderID(testConfig.nodes[0].Spec.ProviderID) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if machine == nil { + t.Fatal("expected to find machine") + } + if !reflect.DeepEqual(machine, testConfig.machines[0]) { + t.Fatalf("expected machines to be equal - expected %+v, got %+v", testConfig.machines[0], machine) + } + + // Test #2: Verify machine is not found if it has no + // corresponding machine annotation. + node := testConfig.nodes[0].DeepCopy() + delete(node.Annotations, machineAnnotationKey) + if err := controller.nodeInformer.GetStore().Update(node); err != nil { + t.Fatalf("unexpected error updating node, got %v", err) + } + machine, err = controller.findMachineByProviderID(testConfig.nodes[0].Spec.ProviderID) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if machine != nil { + t.Fatal("expected find to fail") + } +} + +func TestControllerMachineSetNodeNamesWithoutLinkage(t *testing.T) { + testConfig := createMachineSetTestConfig(testNamespace, 3, map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "10", + }) + + controller, stop := mustCreateTestController(t, testConfig) + defer stop() + + // Remove all linkage between node and machine. + for _, machine := range testConfig.machines { + machine.Spec.ProviderID = nil + if err := controller.machineInformer.Informer().GetStore().Update(newUnstructuredFromMachine(machine)); err != nil { + t.Fatalf("unexpected error updating machine, got %v", err) + } + } + for _, machine := range testConfig.machines { + machine.Status.NodeRef = nil + if err := controller.machineInformer.Informer().GetStore().Update(newUnstructuredFromMachine(machine)); err != nil { + t.Fatalf("unexpected error updating machine, got %v", err) + } + } + + nodegroups, err := controller.nodeGroups() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if l := len(nodegroups); l != 1 { + t.Fatalf("expected 1 nodegroup, got %d", l) + } + + ng := nodegroups[0] + nodeNames, err := ng.Nodes() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + // We removed all linkage - so we should get 0 nodes back. + if len(nodeNames) != 0 { + t.Fatalf("expected len=0, got len=%v", len(nodeNames)) + } +} + +func TestControllerMachineSetNodeNamesUsingProviderID(t *testing.T) { + testConfig := createMachineSetTestConfig(testNamespace, 3, map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "10", + }) + + controller, stop := mustCreateTestController(t, testConfig) + defer stop() + + // Remove Status.NodeRef.Name on all the machines. We want to + // force machineSetNodeNames() to only consider the provider + // ID for lookups. + for _, machine := range testConfig.machines { + machine.Status.NodeRef = nil + if err := controller.machineInformer.Informer().GetStore().Update(newUnstructuredFromMachine(machine)); err != nil { + t.Fatalf("unexpected error updating machine, got %v", err) + } + } + + nodegroups, err := controller.nodeGroups() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if l := len(nodegroups); l != 1 { + t.Fatalf("expected 1 nodegroup, got %d", l) + } + + ng := nodegroups[0] + nodeNames, err := ng.Nodes() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if len(nodeNames) != len(testConfig.nodes) { + t.Fatalf("expected len=%v, got len=%v", len(testConfig.nodes), len(nodeNames)) + } + + sort.Slice(nodeNames, func(i, j int) bool { + return nodeNames[i].Id < nodeNames[j].Id + }) + + for i := range testConfig.nodes { + if nodeNames[i].Id != testConfig.nodes[i].Spec.ProviderID { + t.Fatalf("expected %q, got %q", testConfig.nodes[i].Spec.ProviderID, nodeNames[i].Id) + } + } +} + +func TestControllerMachineSetNodeNamesUsingStatusNodeRefName(t *testing.T) { + testConfig := createMachineSetTestConfig(testNamespace, 3, map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "10", + }) + + controller, stop := mustCreateTestController(t, testConfig) + defer stop() + + // Remove all the provider ID values on all the machines. We + // want to force machineSetNodeNames() to fallback to + // searching using Status.NodeRef.Name. + for _, machine := range testConfig.machines { + machine.Spec.ProviderID = nil + if err := controller.machineInformer.Informer().GetStore().Update(newUnstructuredFromMachine(machine)); err != nil { + t.Fatalf("unexpected error updating machine, got %v", err) + } + } + + nodegroups, err := controller.nodeGroups() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if l := len(nodegroups); l != 1 { + t.Fatalf("expected 1 nodegroup, got %d", l) + } + + nodeNames, err := nodegroups[0].Nodes() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if len(nodeNames) != len(testConfig.nodes) { + t.Fatalf("expected len=%v, got len=%v", len(testConfig.nodes), len(nodeNames)) + } + + sort.Slice(nodeNames, func(i, j int) bool { + return nodeNames[i].Id < nodeNames[j].Id + }) + + for i := range testConfig.nodes { + if nodeNames[i].Id != testConfig.nodes[i].Spec.ProviderID { + t.Fatalf("expected %q, got %q", testConfig.nodes[i].Spec.ProviderID, nodeNames[i].Id) + } + } +} diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_converters.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_converters.go new file mode 100644 index 000000000000..9d883625594b --- /dev/null +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_converters.go @@ -0,0 +1,188 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clusterapi + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/utils/pointer" +) + +func newMachineDeploymentFromUnstructured(u *unstructured.Unstructured) *MachineDeployment { + machineDeployment := MachineDeployment{ + TypeMeta: metav1.TypeMeta{ + Kind: u.GetKind(), + APIVersion: u.GetAPIVersion(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: u.GetName(), + Namespace: u.GetNamespace(), + UID: u.GetUID(), + Labels: u.GetLabels(), + Annotations: u.GetAnnotations(), + OwnerReferences: u.GetOwnerReferences(), + DeletionTimestamp: u.GetDeletionTimestamp(), + }, + Spec: MachineDeploymentSpec{}, + Status: MachineDeploymentStatus{}, + } + + replicas, found, err := unstructured.NestedInt64(u.Object, "spec", "replicas") + if err == nil && found { + machineDeployment.Spec.Replicas = pointer.Int32Ptr(int32(replicas)) + } + + return &machineDeployment +} + +func newMachineSetFromUnstructured(u *unstructured.Unstructured) *MachineSet { + machineSet := MachineSet{ + TypeMeta: metav1.TypeMeta{ + Kind: u.GetKind(), + APIVersion: u.GetAPIVersion(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: u.GetName(), + Namespace: u.GetNamespace(), + UID: u.GetUID(), + Labels: u.GetLabels(), + Annotations: u.GetAnnotations(), + OwnerReferences: u.GetOwnerReferences(), + DeletionTimestamp: u.GetDeletionTimestamp(), + }, + Spec: MachineSetSpec{}, + Status: MachineSetStatus{}, + } + + replicas, found, err := unstructured.NestedInt64(u.Object, "spec", "replicas") + if err == nil && found { + machineSet.Spec.Replicas = pointer.Int32Ptr(int32(replicas)) + } + + return &machineSet +} + +func newMachineFromUnstructured(u *unstructured.Unstructured) *Machine { + machine := Machine{ + TypeMeta: metav1.TypeMeta{ + Kind: u.GetKind(), + APIVersion: u.GetAPIVersion(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: u.GetName(), + Namespace: u.GetNamespace(), + UID: u.GetUID(), + Labels: u.GetLabels(), + Annotations: u.GetAnnotations(), + OwnerReferences: u.GetOwnerReferences(), + ClusterName: u.GetClusterName(), + DeletionTimestamp: u.GetDeletionTimestamp(), + }, + Spec: MachineSpec{}, + Status: MachineStatus{}, + } + + if providerID, _, _ := unstructured.NestedString(u.Object, "spec", "providerID"); providerID != "" { + machine.Spec.ProviderID = pointer.StringPtr(providerID) + } + + nodeRef := corev1.ObjectReference{} + + if nodeRefKind, _, _ := unstructured.NestedString(u.Object, "status", "nodeRef", "kind"); nodeRefKind != "" { + nodeRef.Kind = nodeRefKind + } + + if nodeRefName, _, _ := unstructured.NestedString(u.Object, "status", "nodeRef", "name"); nodeRefName != "" { + nodeRef.Name = nodeRefName + } + + if nodeRef.Name != "" || nodeRef.Kind != "" { + machine.Status.NodeRef = &nodeRef + } + + return &machine +} + +func newUnstructuredFromMachineSet(m *MachineSet) *unstructured.Unstructured { + u := unstructured.Unstructured{} + + u.SetAPIVersion(m.APIVersion) + u.SetAnnotations(m.Annotations) + u.SetKind(m.Kind) + u.SetLabels(m.Labels) + u.SetName(m.Name) + u.SetNamespace(m.Namespace) + u.SetOwnerReferences(m.OwnerReferences) + u.SetUID(m.UID) + u.SetDeletionTimestamp(m.DeletionTimestamp) + + if m.Spec.Replicas != nil { + unstructured.SetNestedField(u.Object, int64(*m.Spec.Replicas), "spec", "replicas") + } + + return &u +} + +func newUnstructuredFromMachineDeployment(m *MachineDeployment) *unstructured.Unstructured { + u := unstructured.Unstructured{} + + u.SetAPIVersion(m.APIVersion) + u.SetAnnotations(m.Annotations) + u.SetKind(m.Kind) + u.SetLabels(m.Labels) + u.SetName(m.Name) + u.SetNamespace(m.Namespace) + u.SetOwnerReferences(m.OwnerReferences) + u.SetUID(m.UID) + u.SetDeletionTimestamp(m.DeletionTimestamp) + + if m.Spec.Replicas != nil { + unstructured.SetNestedField(u.Object, int64(*m.Spec.Replicas), "spec", "replicas") + } + + return &u +} + +func newUnstructuredFromMachine(m *Machine) *unstructured.Unstructured { + u := unstructured.Unstructured{} + + u.SetAPIVersion(m.APIVersion) + u.SetAnnotations(m.Annotations) + u.SetKind(m.Kind) + u.SetLabels(m.Labels) + u.SetName(m.Name) + u.SetNamespace(m.Namespace) + u.SetOwnerReferences(m.OwnerReferences) + u.SetUID(m.UID) + u.SetDeletionTimestamp(m.DeletionTimestamp) + + if m.Spec.ProviderID != nil && *m.Spec.ProviderID != "" { + unstructured.SetNestedField(u.Object, *m.Spec.ProviderID, "spec", "providerID") + } + + if m.Status.NodeRef != nil { + if m.Status.NodeRef.Kind != "" { + unstructured.SetNestedField(u.Object, m.Status.NodeRef.Kind, "status", "nodeRef", "kind") + } + if m.Status.NodeRef.Name != "" { + unstructured.SetNestedField(u.Object, m.Status.NodeRef.Name, "status", "nodeRef", "name") + } + } + + return &u +} diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_machinedeployment.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_machinedeployment.go new file mode 100644 index 000000000000..e10845307217 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_machinedeployment.go @@ -0,0 +1,137 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clusterapi + +import ( + "context" + "fmt" + "path" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/utils/pointer" +) + +type machineDeploymentScalableResource struct { + controller *machineController + machineDeployment *MachineDeployment + maxSize int + minSize int +} + +var _ scalableResource = (*machineDeploymentScalableResource)(nil) + +func (r machineDeploymentScalableResource) ID() string { + return path.Join(r.Namespace(), r.Name()) +} + +func (r machineDeploymentScalableResource) MaxSize() int { + return r.maxSize +} + +func (r machineDeploymentScalableResource) MinSize() int { + return r.minSize +} + +func (r machineDeploymentScalableResource) Name() string { + return r.machineDeployment.Name +} + +func (r machineDeploymentScalableResource) Namespace() string { + return r.machineDeployment.Namespace +} + +func (r machineDeploymentScalableResource) Nodes() ([]string, error) { + var result []string + + if err := r.controller.filterAllMachineSets(func(machineSet *MachineSet) error { + if machineSetIsOwnedByMachineDeployment(machineSet, r.machineDeployment) { + names, err := r.controller.machineSetNodeNames(machineSet) + if err != nil { + return err + } + result = append(result, names...) + } + return nil + }); err != nil { + return nil, err + } + + return result, nil +} + +func (r machineDeploymentScalableResource) Replicas() int32 { + return pointer.Int32PtrDerefOr(r.machineDeployment.Spec.Replicas, 0) +} + +func (r machineDeploymentScalableResource) SetSize(nreplicas int32) error { + u, err := r.controller.dynamicclient.Resource(*r.controller.machineDeploymentResource).Namespace(r.machineDeployment.Namespace).Get(context.TODO(), r.machineDeployment.Name, metav1.GetOptions{}) + + if err != nil { + return err + } + + if u == nil { + return fmt.Errorf("unknown machineDeployment %s", r.machineDeployment.Name) + } + + u = u.DeepCopy() + if err := unstructured.SetNestedField(u.Object, int64(nreplicas), "spec", "replicas"); err != nil { + return fmt.Errorf("failed to set replica value: %v", err) + } + + _, updateErr := r.controller.dynamicclient.Resource(*r.controller.machineDeploymentResource).Namespace(u.GetNamespace()).Update(context.TODO(), u, metav1.UpdateOptions{}) + return updateErr +} + +func (r machineDeploymentScalableResource) MarkMachineForDeletion(machine *Machine) error { + u, err := r.controller.dynamicclient.Resource(*r.controller.machineResource).Namespace(machine.Namespace).Get(context.TODO(), machine.Name, metav1.GetOptions{}) + + if err != nil { + return err + } + if u == nil { + return fmt.Errorf("unknown machine %s", machine.Name) + } + + u = u.DeepCopy() + + annotations := u.GetAnnotations() + if annotations == nil { + annotations = map[string]string{} + } + annotations[machineDeleteAnnotationKey] = time.Now().String() + u.SetAnnotations(annotations) + + _, updateErr := r.controller.dynamicclient.Resource(*r.controller.machineResource).Namespace(u.GetNamespace()).Update(context.TODO(), u, metav1.UpdateOptions{}) + return updateErr +} + +func newMachineDeploymentScalableResource(controller *machineController, machineDeployment *MachineDeployment) (*machineDeploymentScalableResource, error) { + minSize, maxSize, err := parseScalingBounds(machineDeployment.Annotations) + if err != nil { + return nil, fmt.Errorf("error validating min/max annotations: %v", err) + } + + return &machineDeploymentScalableResource{ + controller: controller, + machineDeployment: machineDeployment, + maxSize: maxSize, + minSize: minSize, + }, nil +} diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_machineset.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_machineset.go new file mode 100644 index 000000000000..0cf2c1a7051d --- /dev/null +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_machineset.go @@ -0,0 +1,122 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clusterapi + +import ( + "context" + "fmt" + "path" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/utils/pointer" +) + +type machineSetScalableResource struct { + controller *machineController + machineSet *MachineSet + maxSize int + minSize int +} + +var _ scalableResource = (*machineSetScalableResource)(nil) + +func (r machineSetScalableResource) ID() string { + return path.Join(r.Namespace(), r.Name()) +} + +func (r machineSetScalableResource) MaxSize() int { + return r.maxSize +} + +func (r machineSetScalableResource) MinSize() int { + return r.minSize +} + +func (r machineSetScalableResource) Name() string { + return r.machineSet.Name +} + +func (r machineSetScalableResource) Namespace() string { + return r.machineSet.Namespace +} + +func (r machineSetScalableResource) Nodes() ([]string, error) { + return r.controller.machineSetNodeNames(r.machineSet) +} + +func (r machineSetScalableResource) Replicas() int32 { + return pointer.Int32PtrDerefOr(r.machineSet.Spec.Replicas, 0) +} + +func (r machineSetScalableResource) SetSize(nreplicas int32) error { + u, err := r.controller.dynamicclient.Resource(*r.controller.machineSetResource).Namespace(r.machineSet.Namespace).Get(context.TODO(), r.machineSet.Name, metav1.GetOptions{}) + + if err != nil { + return err + } + + if u == nil { + return fmt.Errorf("unknown machineSet %s", r.machineSet.Name) + } + + u = u.DeepCopy() + if err := unstructured.SetNestedField(u.Object, int64(nreplicas), "spec", "replicas"); err != nil { + return fmt.Errorf("failed to set replica value: %v", err) + } + + _, updateErr := r.controller.dynamicclient.Resource(*r.controller.machineSetResource).Namespace(u.GetNamespace()).Update(context.TODO(), u, metav1.UpdateOptions{}) + return updateErr +} + +func (r machineSetScalableResource) MarkMachineForDeletion(machine *Machine) error { + u, err := r.controller.dynamicclient.Resource(*r.controller.machineResource).Namespace(machine.Namespace).Get(context.TODO(), machine.Name, metav1.GetOptions{}) + + if err != nil { + return err + } + if u == nil { + return fmt.Errorf("unknown machine %s", machine.Name) + } + + u = u.DeepCopy() + + annotations := u.GetAnnotations() + if annotations == nil { + annotations = map[string]string{} + } + annotations[machineDeleteAnnotationKey] = time.Now().String() + u.SetAnnotations(annotations) + + _, updateErr := r.controller.dynamicclient.Resource(*r.controller.machineResource).Namespace(u.GetNamespace()).Update(context.TODO(), u, metav1.UpdateOptions{}) + return updateErr +} + +func newMachineSetScalableResource(controller *machineController, machineSet *MachineSet) (*machineSetScalableResource, error) { + minSize, maxSize, err := parseScalingBounds(machineSet.Annotations) + if err != nil { + return nil, fmt.Errorf("error validating min/max annotations: %v", err) + } + + return &machineSetScalableResource{ + controller: controller, + machineSet: machineSet, + maxSize: maxSize, + minSize: minSize, + }, nil +} diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup.go new file mode 100644 index 000000000000..f3df8359b3f9 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup.go @@ -0,0 +1,257 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clusterapi + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" +) + +const ( + machineDeleteAnnotationKey = "cluster.k8s.io/delete-machine" + machineAnnotationKey = "cluster.k8s.io/machine" + debugFormat = "%s (min: %d, max: %d, replicas: %d)" +) + +type nodegroup struct { + machineController *machineController + scalableResource scalableResource +} + +var _ cloudprovider.NodeGroup = (*nodegroup)(nil) + +func (ng *nodegroup) Name() string { + return ng.scalableResource.Name() +} + +func (ng *nodegroup) Namespace() string { + return ng.scalableResource.Namespace() +} + +func (ng *nodegroup) MinSize() int { + return ng.scalableResource.MinSize() +} + +func (ng *nodegroup) MaxSize() int { + return ng.scalableResource.MaxSize() +} + +// TargetSize returns the current target size of the node group. It is +// possible that the number of nodes in Kubernetes is different at the +// moment but should be equal to Size() once everything stabilizes +// (new nodes finish startup and registration or removed nodes are +// deleted completely). Implementation required. +func (ng *nodegroup) TargetSize() (int, error) { + return int(ng.scalableResource.Replicas()), nil +} + +// IncreaseSize increases the size of the node group. To delete a node +// you need to explicitly name it and use DeleteNode. This function +// should wait until node group size is updated. Implementation +// required. +func (ng *nodegroup) IncreaseSize(delta int) error { + if delta <= 0 { + return fmt.Errorf("size increase must be positive") + } + size := int(ng.scalableResource.Replicas()) + if size+delta > ng.MaxSize() { + return fmt.Errorf("size increase too large - desired:%d max:%d", size+delta, ng.MaxSize()) + } + return ng.scalableResource.SetSize(int32(size + delta)) +} + +// DeleteNodes deletes nodes from this node group. Error is returned +// either on failure or if the given node doesn't belong to this node +// group. This function should wait until node group size is updated. +// Implementation required. +func (ng *nodegroup) DeleteNodes(nodes []*corev1.Node) error { + // Step 1: Verify all nodes belong to this node group. + for _, node := range nodes { + actualNodeGroup, err := ng.machineController.nodeGroupForNode(node) + if err != nil { + return nil + } + + if actualNodeGroup.Id() != ng.Id() { + return fmt.Errorf("node %q doesn't belong to node group %q", node.Spec.ProviderID, ng.Id()) + } + } + + // Step 2: if deleting len(nodes) would make the replica count + // <= 0, then the request to delete that many nodes is bogus + // and we fail fast. + replicas := ng.scalableResource.Replicas() + + if replicas-int32(len(nodes)) <= 0 { + return fmt.Errorf("unable to delete %d machines in %q, machine replicas are <= 0 ", len(nodes), ng.Id()) + } + + // Step 3: annotate the corresponding machine that it is a + // suitable candidate for deletion and drop the replica count + // by 1. Fail fast on any error. + for _, node := range nodes { + machine, err := ng.machineController.findMachineByProviderID(node.Spec.ProviderID) + if err != nil { + return err + } + if machine == nil { + return fmt.Errorf("unknown machine for node %q", node.Spec.ProviderID) + } + + machine = machine.DeepCopy() + + if machine.Annotations == nil { + machine.Annotations = map[string]string{} + } + nodeGroup, err := ng.machineController.nodeGroupForNode(node) + if err != nil { + return err + } + + if err := nodeGroup.scalableResource.MarkMachineForDeletion(machine); err != nil { + return err + } + + if err := ng.scalableResource.SetSize(replicas - 1); err != nil { + return err + } + + replicas-- + } + + return nil +} + +// DecreaseTargetSize decreases the target size of the node group. +// This function doesn't permit to delete any existing node and can be +// used only to reduce the request for new nodes that have not been +// yet fulfilled. Delta should be negative. It is assumed that cloud +// nodegroup will not delete the existing nodes when there is an option +// to just decrease the target. Implementation required. +func (ng *nodegroup) DecreaseTargetSize(delta int) error { + if delta >= 0 { + return fmt.Errorf("size decrease must be negative") + } + + size, err := ng.TargetSize() + if err != nil { + return err + } + + nodes, err := ng.Nodes() + if err != nil { + return err + } + + if size+delta < len(nodes) { + return fmt.Errorf("attempt to delete existing nodes targetSize:%d delta:%d existingNodes: %d", + size, delta, len(nodes)) + } + + return ng.scalableResource.SetSize(int32(size + delta)) +} + +// Id returns an unique identifier of the node group. +func (ng *nodegroup) Id() string { + return ng.scalableResource.ID() +} + +// Debug returns a string containing all information regarding this node group. +func (ng *nodegroup) Debug() string { + return fmt.Sprintf(debugFormat, ng.Id(), ng.MinSize(), ng.MaxSize(), ng.scalableResource.Replicas()) +} + +// Nodes returns a list of all nodes that belong to this node group. +func (ng *nodegroup) Nodes() ([]cloudprovider.Instance, error) { + nodes, err := ng.scalableResource.Nodes() + if err != nil { + return nil, err + } + + instances := make([]cloudprovider.Instance, len(nodes)) + for i := range nodes { + instances[i] = cloudprovider.Instance{ + Id: nodes[i], + } + } + + return instances, nil +} + +// TemplateNodeInfo returns a schedulercache.NodeInfo structure of an +// empty (as if just started) node. This will be used in scale-up +// simulations to predict what would a new node look like if a node +// group was expanded. The returned NodeInfo is expected to have a +// fully populated Node object, with all of the labels, capacity and +// allocatable information as well as all pods that are started on the +// node by default, using manifest (most likely only kube-proxy). +// Implementation optional. +func (ng *nodegroup) TemplateNodeInfo() (*schedulernodeinfo.NodeInfo, error) { + return nil, cloudprovider.ErrNotImplemented +} + +// Exist checks if the node group really exists on the cloud nodegroup +// side. Allows to tell the theoretical node group from the real one. +// Implementation required. +func (ng *nodegroup) Exist() bool { + return true +} + +// Create creates the node group on the cloud nodegroup side. +// Implementation optional. +func (ng *nodegroup) Create() (cloudprovider.NodeGroup, error) { + return nil, cloudprovider.ErrAlreadyExist +} + +// Delete deletes the node group on the cloud nodegroup side. This will +// be executed only for autoprovisioned node groups, once their size +// drops to 0. Implementation optional. +func (ng *nodegroup) Delete() error { + return cloudprovider.ErrNotImplemented +} + +// Autoprovisioned returns true if the node group is autoprovisioned. +// An autoprovisioned group was created by CA and can be deleted when +// scaled to 0. +func (ng *nodegroup) Autoprovisioned() bool { + return false +} + +func newNodegroupFromMachineSet(controller *machineController, machineSet *MachineSet) (*nodegroup, error) { + scalableResource, err := newMachineSetScalableResource(controller, machineSet) + if err != nil { + return nil, err + } + return &nodegroup{ + machineController: controller, + scalableResource: scalableResource, + }, nil +} + +func newNodegroupFromMachineDeployment(controller *machineController, machineDeployment *MachineDeployment) (*nodegroup, error) { + scalableResource, err := newMachineDeploymentScalableResource(controller, machineDeployment) + if err != nil { + return nil, err + } + return &nodegroup{ + machineController: controller, + scalableResource: scalableResource, + }, nil +} diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup_test.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup_test.go new file mode 100644 index 000000000000..7ed410f1dd9d --- /dev/null +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup_test.go @@ -0,0 +1,846 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clusterapi + +import ( + "fmt" + "path" + "sort" + "strings" + "testing" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" + "k8s.io/utils/pointer" +) + +const ( + testNamespace = "test-namespace" +) + +func TestNodeGroupNewNodeGroupConstructor(t *testing.T) { + type testCase struct { + description string + annotations map[string]string + errors bool + replicas int32 + minSize int + maxSize int + nodeCount int + } + + var testCases = []testCase{{ + description: "errors because minSize is invalid", + annotations: map[string]string{ + nodeGroupMinSizeAnnotationKey: "-1", + nodeGroupMaxSizeAnnotationKey: "0", + }, + errors: true, + }, { + description: "errors because maxSize is invalid", + annotations: map[string]string{ + nodeGroupMinSizeAnnotationKey: "0", + nodeGroupMaxSizeAnnotationKey: "-1", + }, + errors: true, + }, { + description: "errors because minSize > maxSize", + annotations: map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "0", + }, + errors: true, + }, { + description: "errors because maxSize < minSize", + annotations: map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "0", + }, + errors: true, + }, { + description: "no error: min=0, max=0", + minSize: 0, + maxSize: 0, + replicas: 0, + errors: false, + }, { + description: "no error: min=0, max=1", + annotations: map[string]string{ + nodeGroupMaxSizeAnnotationKey: "1", + }, + minSize: 0, + maxSize: 1, + replicas: 0, + errors: false, + }, { + description: "no error: min=1, max=10, replicas=5", + annotations: map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "10", + }, + minSize: 1, + maxSize: 10, + replicas: 5, + nodeCount: 5, + errors: false, + }} + + newNodeGroup := func(t *testing.T, controller *machineController, testConfig *testConfig) (*nodegroup, error) { + if testConfig.machineDeployment != nil { + return newNodegroupFromMachineDeployment(controller, testConfig.machineDeployment) + } + return newNodegroupFromMachineSet(controller, testConfig.machineSet) + } + + test := func(t *testing.T, tc testCase, testConfig *testConfig) { + controller, stop := mustCreateTestController(t) + defer stop() + + ng, err := newNodeGroup(t, controller, testConfig) + if tc.errors && err == nil { + t.Fatal("expected an error") + } + + if !tc.errors && ng == nil { + t.Fatalf("test case logic error: %v", err) + } + + if tc.errors { + // if the test case is expected to error then + // don't assert the remainder + return + } + + if ng == nil { + t.Fatal("expected nodegroup to be non-nil") + } + + var expectedName string + + switch v := (ng.scalableResource).(type) { + case *machineSetScalableResource: + expectedName = testConfig.spec.machineSetName + case *machineDeploymentScalableResource: + expectedName = testConfig.spec.machineDeploymentName + default: + t.Fatalf("unexpected type: %T", v) + } + + expectedID := path.Join(testConfig.spec.namespace, expectedName) + expectedDebug := fmt.Sprintf(debugFormat, expectedID, tc.minSize, tc.maxSize, tc.replicas) + + if ng.Name() != expectedName { + t.Errorf("expected %q, got %q", expectedName, ng.Name()) + } + + if ng.Namespace() != testConfig.spec.namespace { + t.Errorf("expected %q, got %q", testConfig.spec.namespace, ng.Namespace()) + } + + if ng.MinSize() != tc.minSize { + t.Errorf("expected %v, got %v", tc.minSize, ng.MinSize()) + } + + if ng.MaxSize() != tc.maxSize { + t.Errorf("expected %v, got %v", tc.maxSize, ng.MaxSize()) + } + + if ng.Id() != expectedID { + t.Errorf("expected %q, got %q", expectedID, ng.Id()) + } + + if ng.Debug() != expectedDebug { + t.Errorf("expected %q, got %q", expectedDebug, ng.Debug()) + } + + if _, err := ng.TemplateNodeInfo(); err != cloudprovider.ErrNotImplemented { + t.Error("expected error") + } + + if exists := ng.Exist(); !exists { + t.Errorf("expected %t, got %t", true, exists) + } + + if _, err := ng.Create(); err != cloudprovider.ErrAlreadyExist { + t.Error("expected error") + } + + if err := ng.Delete(); err != cloudprovider.ErrNotImplemented { + t.Error("expected error") + } + + if result := ng.Autoprovisioned(); result { + t.Errorf("expected %t, got %t", false, result) + } + + // We test ng.Nodes() in TestControllerNodeGroupsNodeCount + } + + t.Run("MachineSet", func(t *testing.T) { + for _, tc := range testCases { + t.Run(tc.description, func(t *testing.T) { + test(t, tc, createMachineSetTestConfig(testNamespace, tc.nodeCount, tc.annotations)) + }) + } + }) + + t.Run("MachineDeployment", func(t *testing.T) { + for _, tc := range testCases { + t.Run(tc.description, func(t *testing.T) { + test(t, tc, createMachineDeploymentTestConfig(testNamespace, tc.nodeCount, tc.annotations)) + }) + } + }) +} + +func TestNodeGroupIncreaseSizeErrors(t *testing.T) { + type testCase struct { + description string + delta int + initial int32 + errorMsg string + } + + testCases := []testCase{{ + description: "errors because delta is negative", + delta: -1, + initial: 3, + errorMsg: "size increase must be positive", + }, { + description: "errors because initial+delta > maxSize", + delta: 8, + initial: 3, + errorMsg: "size increase too large - desired:11 max:10", + }} + + test := func(t *testing.T, tc *testCase, testConfig *testConfig) { + controller, stop := mustCreateTestController(t, testConfig) + defer stop() + + nodegroups, err := controller.nodeGroups() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if l := len(nodegroups); l != 1 { + t.Fatalf("expected 1 nodegroup, got %d", l) + } + + ng := nodegroups[0] + currReplicas, err := ng.TargetSize() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if currReplicas != int(tc.initial) { + t.Errorf("expected %v, got %v", tc.initial, currReplicas) + } + + errors := len(tc.errorMsg) > 0 + + err = ng.IncreaseSize(tc.delta) + if errors && err == nil { + t.Fatal("expected an error") + } + + if !errors && err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if !strings.Contains(err.Error(), tc.errorMsg) { + t.Errorf("expected error message to contain %q, got %q", tc.errorMsg, err.Error()) + } + + switch v := (ng.scalableResource).(type) { + case *machineSetScalableResource: + // A nodegroup is immutable; get a fresh copy. + ms, err := ng.machineController.getMachineSet(ng.Namespace(), ng.Name(), v1.GetOptions{}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if actual := pointer.Int32PtrDerefOr(ms.Spec.Replicas, 0); actual != tc.initial { + t.Errorf("expected %v, got %v", tc.initial, actual) + } + case *machineDeploymentScalableResource: + // A nodegroup is immutable; get a fresh copy. + md, err := ng.machineController.getMachineDeployment(ng.Namespace(), ng.Name(), v1.GetOptions{}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if actual := pointer.Int32PtrDerefOr(md.Spec.Replicas, 0); actual != tc.initial { + t.Errorf("expected %v, got %v", tc.initial, actual) + } + default: + t.Errorf("unexpected type: %T", v) + } + } + + t.Run("MachineSet", func(t *testing.T) { + for _, tc := range testCases { + t.Run(tc.description, func(t *testing.T) { + annotations := map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "10", + } + test(t, &tc, createMachineSetTestConfig(testNamespace, int(tc.initial), annotations)) + }) + } + }) + + t.Run("MachineDeployment", func(t *testing.T) { + for _, tc := range testCases { + t.Run(tc.description, func(t *testing.T) { + annotations := map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "10", + } + test(t, &tc, createMachineDeploymentTestConfig(testNamespace, int(tc.initial), annotations)) + }) + } + }) +} + +func TestNodeGroupIncreaseSize(t *testing.T) { + type testCase struct { + description string + delta int + initial int32 + expected int32 + } + + test := func(t *testing.T, tc *testCase, testConfig *testConfig) { + controller, stop := mustCreateTestController(t, testConfig) + defer stop() + + nodegroups, err := controller.nodeGroups() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if l := len(nodegroups); l != 1 { + t.Fatalf("expected 1 nodegroup, got %d", l) + } + + ng := nodegroups[0] + currReplicas, err := ng.TargetSize() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if currReplicas != int(tc.initial) { + t.Errorf("initially expected %v, got %v", tc.initial, currReplicas) + } + + if err := ng.IncreaseSize(tc.delta); err != nil { + t.Fatalf("unexpected error: %v", err) + } + + switch v := (ng.scalableResource).(type) { + case *machineSetScalableResource: + // A nodegroup is immutable; get a fresh copy. + ms, err := ng.machineController.getMachineSet(ng.Namespace(), ng.Name(), v1.GetOptions{}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if actual := pointer.Int32PtrDerefOr(ms.Spec.Replicas, 0); actual != tc.expected { + t.Errorf("expected %v, got %v", tc.expected, actual) + } + case *machineDeploymentScalableResource: + // A nodegroup is immutable; get a fresh copy. + md, err := ng.machineController.getMachineDeployment(ng.Namespace(), ng.Name(), v1.GetOptions{}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if actual := pointer.Int32PtrDerefOr(md.Spec.Replicas, 0); actual != tc.expected { + t.Errorf("expected %v, got %v", tc.expected, actual) + } + default: + t.Errorf("unexpected type: %T", v) + } + } + + annotations := map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "10", + } + + t.Run("MachineSet", func(t *testing.T) { + tc := testCase{ + description: "increase by 1", + initial: 3, + expected: 4, + delta: 1, + } + test(t, &tc, createMachineSetTestConfig(testNamespace, int(tc.initial), annotations)) + }) + + t.Run("MachineDeployment", func(t *testing.T) { + tc := testCase{ + description: "increase by 1", + initial: 3, + expected: 4, + delta: 1, + } + test(t, &tc, createMachineDeploymentTestConfig(testNamespace, int(tc.initial), annotations)) + }) +} + +func TestNodeGroupDecreaseTargetSize(t *testing.T) { + type testCase struct { + description string + delta int + initial int32 + expected int32 + } + + test := func(t *testing.T, tc *testCase, testConfig *testConfig) { + controller, stop := mustCreateTestController(t, testConfig) + defer stop() + + nodegroups, err := controller.nodeGroups() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if l := len(nodegroups); l != 1 { + t.Fatalf("expected 1 nodegroup, got %d", l) + } + + ng := nodegroups[0] + currReplicas, err := ng.TargetSize() + if err := controller.machineSetInformer.Informer().GetStore().Add(newUnstructuredFromMachineSet(testConfig.machineSet)); err != nil { + } + + if err := controller.nodeInformer.GetStore().Delete(testConfig.nodes[0]); err != nil { + if err := controller.machineDeploymentInformer.Informer().GetStore().Add(newUnstructuredFromMachineDeployment(testConfig.machineDeployment)); err != nil { + } + + if err := ng.DecreaseTargetSize(tc.delta); err != nil { + t.Fatalf("unexpected error: %v", err) + } + + switch v := (ng.scalableResource).(type) { + case *machineSetScalableResource: + // A nodegroup is immutable; get a fresh copy. + ms, err := ng.machineController.getMachineSet(ng.Namespace(), ng.Name(), v1.GetOptions{}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if actual := pointer.Int32PtrDerefOr(ms.Spec.Replicas, 0); actual != tc.expected { + t.Errorf("expected %v, got %v", tc.expected, actual) + } + case *machineDeploymentScalableResource: + // A nodegroup is immutable; get a fresh copy. + md, err := ng.machineController.getMachineDeployment(ng.Namespace(), ng.Name(), v1.GetOptions{}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if actual := pointer.Int32PtrDerefOr(md.Spec.Replicas, 0); actual != tc.expected { + t.Errorf("expected %v, got %v", tc.expected, actual) + } + default: + t.Errorf("unexpected type: %T", v) + } + } + + annotations := map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "10", + } + + t.Run("MachineSet", func(t *testing.T) { + tc := testCase{ + description: "decrease by 1", + initial: 3, + expected: 2, + delta: -1, + } + test(t, &tc, createMachineSetTestConfig(testNamespace, int(tc.initial), annotations)) + }) + + t.Run("MachineDeployment", func(t *testing.T) { + tc := testCase{ + description: "decrease by 1", + initial: 3, + expected: 2, + delta: -1, + } + test(t, &tc, createMachineDeploymentTestConfig(testNamespace, int(tc.initial), annotations)) + }) +} + +func TestNodeGroupDecreaseSizeErrors(t *testing.T) { + type testCase struct { + description string + delta int + initial int32 + errorMsg string + } + + testCases := []testCase{{ + description: "errors because delta is positive", + delta: 1, + initial: 3, + errorMsg: "size decrease must be negative", + }, { + description: "errors because initial+delta < len(nodes)", + delta: -1, + initial: 3, + errorMsg: "attempt to delete existing nodes targetSize:3 delta:-1 existingNodes: 3", + }} + + test := func(t *testing.T, tc *testCase, testConfig *testConfig) { + controller, stop := mustCreateTestController(t, testConfig) + defer stop() + + nodegroups, err := controller.nodeGroups() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if l := len(nodegroups); l != 1 { + t.Fatalf("expected 1 nodegroup, got %d", l) + } + + ng := nodegroups[0] + currReplicas, err := ng.TargetSize() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if currReplicas != int(tc.initial) { + t.Errorf("expected %v, got %v", tc.initial, currReplicas) + } + + errors := len(tc.errorMsg) > 0 + + err = ng.DecreaseTargetSize(tc.delta) + if errors && err == nil { + t.Fatal("expected an error") + } + + if !errors && err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if !strings.Contains(err.Error(), tc.errorMsg) { + t.Errorf("expected error message to contain %q, got %q", tc.errorMsg, err.Error()) + } + + switch v := (ng.scalableResource).(type) { + case *machineSetScalableResource: + // A nodegroup is immutable; get a fresh copy. + ms, err := ng.machineController.getMachineSet(ng.Namespace(), ng.Name(), v1.GetOptions{}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if actual := pointer.Int32PtrDerefOr(ms.Spec.Replicas, 0); actual != tc.initial { + t.Errorf("expected %v, got %v", tc.initial, actual) + } + case *machineDeploymentScalableResource: + // A nodegroup is immutable; get a fresh copy. + md, err := ng.machineController.getMachineDeployment(ng.Namespace(), ng.Name(), v1.GetOptions{}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if actual := pointer.Int32PtrDerefOr(md.Spec.Replicas, 0); actual != tc.initial { + t.Errorf("expected %v, got %v", tc.initial, actual) + } + default: + t.Errorf("unexpected type: %T", v) + } + } + + t.Run("MachineSet", func(t *testing.T) { + for _, tc := range testCases { + t.Run(tc.description, func(t *testing.T) { + annotations := map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "10", + } + test(t, &tc, createMachineSetTestConfig(testNamespace, int(tc.initial), annotations)) + }) + } + }) + + t.Run("MachineDeployment", func(t *testing.T) { + for _, tc := range testCases { + t.Run(tc.description, func(t *testing.T) { + annotations := map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "10", + } + test(t, &tc, createMachineDeploymentTestConfig(testNamespace, int(tc.initial), annotations)) + }) + } + }) +} + +func TestNodeGroupDeleteNodes(t *testing.T) { + test := func(t *testing.T, testConfig *testConfig) { + controller, stop := mustCreateTestController(t, testConfig) + defer stop() + + nodegroups, err := controller.nodeGroups() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if l := len(nodegroups); l != 1 { + t.Fatalf("expected 1 nodegroup, got %d", l) + } + + ng := nodegroups[0] + nodeNames, err := ng.Nodes() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if len(nodeNames) != len(testConfig.nodes) { + t.Fatalf("expected len=%v, got len=%v", len(testConfig.nodes), len(nodeNames)) + } + + sort.SliceStable(nodeNames, func(i, j int) bool { + return nodeNames[i].Id < nodeNames[j].Id + }) + + for i := 0; i < len(nodeNames); i++ { + if nodeNames[i].Id != testConfig.nodes[i].Spec.ProviderID { + t.Fatalf("expected %q, got %q", testConfig.nodes[i].Spec.ProviderID, nodeNames[i].Id) + } + } + + if err := ng.DeleteNodes(testConfig.nodes[5:]); err != nil { + t.Errorf("unexpected error: %v", err) + } + + for i := 5; i < len(testConfig.machines); i++ { + machine, err := controller.getMachine(testConfig.machines[i].Namespace, testConfig.machines[i].Name, v1.GetOptions{}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if _, found := machine.Annotations[machineDeleteAnnotationKey]; !found { + t.Errorf("expected annotation %q on machine %s", machineDeleteAnnotationKey, machine.Name) + } + } + + switch v := (ng.scalableResource).(type) { + case *machineSetScalableResource: + updatedMachineSet, err := controller.getMachineSet(testConfig.machineSet.Namespace, testConfig.machineSet.Name, v1.GetOptions{}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if actual := pointer.Int32PtrDerefOr(updatedMachineSet.Spec.Replicas, 0); actual != 5 { + t.Fatalf("expected 5 nodes, got %v", actual) + } + case *machineDeploymentScalableResource: + updatedMachineDeployment, err := controller.getMachineDeployment(testConfig.machineDeployment.Namespace, testConfig.machineDeployment.Name, v1.GetOptions{}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if actual := pointer.Int32PtrDerefOr(updatedMachineDeployment.Spec.Replicas, 0); actual != 5 { + t.Fatalf("expected 5 nodes, got %v", actual) + } + default: + t.Errorf("unexpected type: %T", v) + } + } + + // Note: 10 is an upper bound for the number of nodes/replicas + // Going beyond 10 will break the sorting that happens in the + // test() function because sort.Strings() will not do natural + // sorting and the expected semantics in test() will fail. + + t.Run("MachineSet", func(t *testing.T) { + test(t, createMachineSetTestConfig(testNamespace, 10, map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "10", + })) + }) + + t.Run("MachineDeployment", func(t *testing.T) { + test(t, createMachineDeploymentTestConfig(testNamespace, 10, map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "10", + })) + }) +} + +func TestNodeGroupMachineSetDeleteNodesWithMismatchedNodes(t *testing.T) { + test := func(t *testing.T, expected int, testConfigs []*testConfig) { + t.Helper() + testConfig0, testConfig1 := testConfigs[0], testConfigs[1] + controller, stop := mustCreateTestController(t, testConfigs...) + defer stop() + + nodegroups, err := controller.nodeGroups() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if l := len(nodegroups); l != expected { + t.Fatalf("expected %d, got %d", expected, l) + } + + ng0, err := controller.nodeGroupForNode(testConfig0.nodes[0]) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + ng1, err := controller.nodeGroupForNode(testConfig1.nodes[0]) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + // Deleting nodes that are not in ng0 should fail. + err0 := ng0.DeleteNodes(testConfig1.nodes) + if err0 == nil { + t.Error("expected an error") + } + + expectedErr0 := `node "test-namespace1-machineset-0-nodeid-0" doesn't belong to node group "test-namespace0/machineset-0"` + if testConfig0.machineDeployment != nil { + expectedErr0 = `node "test-namespace1-machineset-0-nodeid-0" doesn't belong to node group "test-namespace0/machinedeployment-0"` + } + + if !strings.Contains(err0.Error(), expectedErr0) { + t.Errorf("expected: %q, got: %q", expectedErr0, err0.Error()) + } + + // Deleting nodes that are not in ng1 should fail. + err1 := ng1.DeleteNodes(testConfig0.nodes) + if err1 == nil { + t.Error("expected an error") + } + + expectedErr1 := `node "test-namespace0-machineset-0-nodeid-0" doesn't belong to node group "test-namespace1/machineset-0"` + if testConfig1.machineDeployment != nil { + expectedErr1 = `node "test-namespace0-machineset-0-nodeid-0" doesn't belong to node group "test-namespace1/machinedeployment-0"` + } + + if !strings.Contains(err1.Error(), expectedErr1) { + t.Errorf("expected: %q, got: %q", expectedErr1, err1.Error()) + } + + // Deleting from correct node group should fail because + // replicas would become <= 0. + if err := ng0.DeleteNodes(testConfig0.nodes); err == nil { + t.Error("expected error") + } + + // Deleting from correct node group should fail because + // replicas would become <= 0. + if err := ng1.DeleteNodes(testConfig1.nodes); err == nil { + t.Error("expected error") + } + } + + annotations := map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "3", + } + + t.Run("MachineSet", func(t *testing.T) { + testConfig0 := createMachineSetTestConfigs(testNamespace+"0", 1, 2, annotations) + testConfig1 := createMachineSetTestConfigs(testNamespace+"1", 1, 2, annotations) + test(t, 2, append(testConfig0, testConfig1...)) + }) + + t.Run("MachineDeployment", func(t *testing.T) { + testConfig0 := createMachineDeploymentTestConfigs(testNamespace+"0", 1, 2, annotations) + testConfig1 := createMachineDeploymentTestConfigs(testNamespace+"1", 1, 2, annotations) + test(t, 2, append(testConfig0, testConfig1...)) + }) +} + + addDeletionTimestamp := func(t *testing.T, controller *machineController, machine *Machine) error { + // Simulate delete that would have happened if the + // Machine API controllers were running Don't actually + // delete since the fake client does not support + // finalizers. + now := v1.Now() + machine.DeletionTimestamp = &now + return controller.machineInformer.Informer().GetStore().Update(newUnstructuredFromMachine(machine)) + } + + // Assert that we have no DeletionTimestamp + for i := 7; i < len(testConfig.machines); i++ { + if !testConfig.machines[i].ObjectMeta.DeletionTimestamp.IsZero() { + t.Fatalf("unexpected DeletionTimestamp") + } + } + if err := ng.DeleteNodes(testConfig.nodes[7:]); err != nil { + t.Fatalf("unexpected error: %v", err) + } + for i := 7; i < len(testConfig.machines); i++ { + if err := addDeletionTimestamp(t, controller, testConfig.machines[i]); err != nil { + t.Fatalf("unexpected err: %v", err) + } + if testConfig.machines[i].ObjectMeta.DeletionTimestamp.IsZero() { + t.Fatalf("expected a DeletionTimestamp") + } + } + // TODO(frobware) We have a flaky test here because we + // just called Delete and Update and the next call to + // controller.nodeGroups() will sometimes get stale + // objects from the (fakeclient) store. To fix this we + // should update the test machinery so that individual + // tests can have callbacks on Add/Update/Delete on + // each of the respective informers. We should then + // override those callbacks here in this test to add + // rendezvous points so that we wait until all objects + // have been updated before we go and get them again. + // + // Running this test with a 500ms duration I see: + // + // $ ./stress ./clusterapi.test -test.run TestNodeGroupDeleteNodesTwice -test.count 5 | ts | ts -i + // 00:00:05 Feb 27 14:29:36 0 runs so far, 0 failures + // 00:00:05 Feb 27 14:29:41 8 runs so far, 0 failures + // 00:00:05 Feb 27 14:29:46 16 runs so far, 0 failures + // 00:00:05 Feb 27 14:29:51 24 runs so far, 0 failures + // 00:00:05 Feb 27 14:29:56 32 runs so far, 0 failures + // ... + // 00:00:05 Feb 27 14:31:01 112 runs so far, 0 failures + // 00:00:05 Feb 27 14:31:06 120 runs so far, 0 failures + // 00:00:05 Feb 27 14:31:11 128 runs so far, 0 failures + // 00:00:05 Feb 27 14:31:16 136 runs so far, 0 failures + // 00:00:05 Feb 27 14:31:21 144 runs so far, 0 failures + // + // To make sure we don't run into any flakes in CI + // I've chosen to make this sleep duration 3s. + time.Sleep(3 * time.Second) + + nodegroups, err = controller.nodeGroups() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + ng = nodegroups[0] + + // Attempt to delete the nodes again which verifies + // that nodegroup.DeleteNodes() skips over nodes that + // have a non-nil DeletionTimestamp value. + if err := ng.DeleteNodes(testConfig.nodes[7:]); err != nil { + t.Fatalf("unexpected error: %v", err) + } + + actualSize, err := ng.TargetSize() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + expectedSize := len(testConfig.machines) - len(testConfig.machines[7:]) + if actualSize != expectedSize { + t.Fatalf("expected %d nodes, got %d", expectedSize, actualSize) + } diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_provider.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_provider.go new file mode 100644 index 000000000000..48824fc55913 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_provider.go @@ -0,0 +1,173 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clusterapi + +import ( + "reflect" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" + "k8s.io/autoscaler/cluster-autoscaler/config" + "k8s.io/autoscaler/cluster-autoscaler/utils/errors" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/klog" +) + +const ( + // ProviderName is the name of cluster-api cloud provider. + ProviderName = "clusterapi" + + // GPULabel is the label added to nodes with GPU resource. + GPULabel = "cluster-api/accelerator" +) + +var _ cloudprovider.CloudProvider = (*provider)(nil) + +type provider struct { + controller *machineController + providerName string + resourceLimiter *cloudprovider.ResourceLimiter +} + +func (p *provider) Name() string { + return p.providerName +} + +func (p *provider) GetResourceLimiter() (*cloudprovider.ResourceLimiter, error) { + return p.resourceLimiter, nil +} + +func (p *provider) NodeGroups() []cloudprovider.NodeGroup { + var result []cloudprovider.NodeGroup + nodegroups, err := p.controller.nodeGroups() + if err != nil { + klog.Errorf("error getting node groups: %v", err) + return nil + } + for _, ng := range nodegroups { + klog.V(4).Infof("discovered node group: %s", ng.Debug()) + result = append(result, ng) + } + return result +} + +func (p *provider) NodeGroupForNode(node *corev1.Node) (cloudprovider.NodeGroup, error) { + ng, err := p.controller.nodeGroupForNode(node) + if err != nil { + return nil, err + } + if ng == nil || reflect.ValueOf(ng).IsNil() { + return nil, nil + } + return ng, nil +} + +func (*provider) Pricing() (cloudprovider.PricingModel, errors.AutoscalerError) { + return nil, cloudprovider.ErrNotImplemented +} + +func (*provider) GetAvailableMachineTypes() ([]string, error) { + return []string{}, nil +} + +func (*provider) NewNodeGroup( + machineType string, + labels map[string]string, + systemLabels map[string]string, + taints []corev1.Taint, + extraResources map[string]resource.Quantity, +) (cloudprovider.NodeGroup, error) { + return nil, cloudprovider.ErrNotImplemented +} + +func (*provider) Cleanup() error { + return nil +} + +func (p *provider) Refresh() error { + return nil +} + +// GetInstanceID gets the instance ID for the specified node. +func (p *provider) GetInstanceID(node *corev1.Node) string { + return node.Spec.ProviderID +} + +// GetAvailableGPUTypes return all available GPU types cloud provider supports. +func (p *provider) GetAvailableGPUTypes() map[string]struct{} { + // TODO: implement this + return nil +} + +// GPULabel returns the label added to nodes with GPU resource. +func (p *provider) GPULabel() string { + return GPULabel +} + +func newProvider( + name string, + rl *cloudprovider.ResourceLimiter, + controller *machineController, +) (cloudprovider.CloudProvider, error) { + return &provider{ + providerName: name, + resourceLimiter: rl, + controller: controller, + }, nil +} + +// BuildClusterAPI builds CloudProvider implementation for machine api. +func BuildClusterAPI(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter) cloudprovider.CloudProvider { + externalConfig, err := clientcmd.BuildConfigFromFlags("", opts.KubeConfigPath) + if err != nil { + klog.Fatalf("cannot build config: %v", err) + } + + // Grab a dynamic interface that we can create informers from + dc, err := dynamic.NewForConfig(externalConfig) + if err != nil { + klog.Fatalf("could not generate dynamic client for config") + } + + kubeclient, err := kubernetes.NewForConfig(externalConfig) + if err != nil { + klog.Fatalf("create kube clientset failed: %v", err) + } + + controller, err := newMachineController(dc, kubeclient) + if err != nil { + klog.Fatal(err) + } + + // Ideally this would be passed in but the builder is not + // currently organised to do so. + stopCh := make(chan struct{}) + + if err := controller.run(stopCh); err != nil { + klog.Fatal(err) + } + + provider, err := newProvider(ProviderName, rl, controller) + if err != nil { + klog.Fatal(err) + } + + return provider +} diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_provider_test.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_provider_test.go new file mode 100644 index 000000000000..6ba2774c2cb7 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_provider_test.go @@ -0,0 +1,106 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clusterapi + +import ( + "reflect" + "testing" + + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" +) + +func TestProviderConstructorProperties(t *testing.T) { + resourceLimits := cloudprovider.ResourceLimiter{} + + controller, stop := mustCreateTestController(t) + defer stop() + + provider, err := newProvider(ProviderName, &resourceLimits, controller) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if actual := provider.Name(); actual != ProviderName { + t.Errorf("expected %q, got %q", ProviderName, actual) + } + + rl, err := provider.GetResourceLimiter() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if reflect.DeepEqual(rl, resourceLimits) { + t.Errorf("expected %+v, got %+v", resourceLimits, rl) + } + + if _, err := provider.Pricing(); err != cloudprovider.ErrNotImplemented { + t.Errorf("expected an error") + } + + machineTypes, err := provider.GetAvailableMachineTypes() + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if len(machineTypes) != 0 { + t.Errorf("expected 0, got %v", len(machineTypes)) + } + + if _, err := provider.NewNodeGroup("foo", nil, nil, nil, nil); err == nil { + t.Error("expected an error") + } + + if err := provider.Cleanup(); err != nil { + t.Errorf("unexpected error: %v", err) + } + + if err := provider.Refresh(); err != nil { + t.Errorf("unexpected error: %v", err) + } + + nodegroups := provider.NodeGroups() + + if len(nodegroups) != 0 { + t.Errorf("expected 0, got %v", len(nodegroups)) + } + + ng, err := provider.NodeGroupForNode(&corev1.Node{ + TypeMeta: v1.TypeMeta{ + Kind: "Node", + }, + ObjectMeta: v1.ObjectMeta{ + Name: "missing-node", + }, + }) + + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if ng != nil { + t.Fatalf("unexpected nodegroup: %v", ng.Id()) + } + + if got := provider.GPULabel(); got != GPULabel { + t.Fatalf("expected %q, got %q", GPULabel, got) + } + + if got := len(provider.GetAvailableGPUTypes()); got != 0 { + t.Fatalf("expected 0 GPU types, got %d", got) + } +} diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_scalableresource.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_scalableresource.go new file mode 100644 index 000000000000..3a4aa02161ce --- /dev/null +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_scalableresource.go @@ -0,0 +1,49 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clusterapi + +// scalableResource is a resource that can be scaled up and down by +// adjusting its replica count field. +type scalableResource interface { + // Id returns an unique identifier of the resource + ID() string + + // MaxSize returns maximum size of the resource + MaxSize() int + + // MinSize returns minimum size of the resource + MinSize() int + + // Name returns the name of the resource + Name() string + + // Namespace returns the namespace the resource is in + Namespace() string + + // Nodes returns a list of all nodes that belong to this + // resource + Nodes() ([]string, error) + + // SetSize() sets the replica count of the resource + SetSize(nreplicas int32) error + + // Replicas returns the current replica count of the resource + Replicas() int32 + + // MarkMachineForDeletion marks machine for deletion + MarkMachineForDeletion(machine *Machine) error +} diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils.go new file mode 100644 index 000000000000..9f4246c36914 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils.go @@ -0,0 +1,145 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clusterapi + +import ( + "strconv" + + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + nodeGroupMinSizeAnnotationKey = "cluster.k8s.io/cluster-api-autoscaler-node-group-min-size" + nodeGroupMaxSizeAnnotationKey = "cluster.k8s.io/cluster-api-autoscaler-node-group-max-size" +) + +var ( + // errMissingMinAnnotation is the error returned when a + // machine set does not have an annotation keyed by + // nodeGroupMinSizeAnnotationKey. + errMissingMinAnnotation = errors.New("missing min annotation") + + // errMissingMaxAnnotation is the error returned when a + // machine set does not have an annotation keyed by + // nodeGroupMaxSizeAnnotationKey. + errMissingMaxAnnotation = errors.New("missing max annotation") + + // errInvalidMinAnnotationValue is the error returned when a + // machine set has a non-integral min annotation value. + errInvalidMinAnnotation = errors.New("invalid min annotation") + + // errInvalidMaxAnnotationValue is the error returned when a + // machine set has a non-integral max annotation value. + errInvalidMaxAnnotation = errors.New("invalid max annotation") +) + +// minSize returns the minimum value encoded in the annotations keyed +// by nodeGroupMinSizeAnnotationKey. Returns errMissingMinAnnotation +// if the annotation doesn't exist or errInvalidMinAnnotation if the +// value is not of type int. +func minSize(annotations map[string]string) (int, error) { + val, found := annotations[nodeGroupMinSizeAnnotationKey] + if !found { + return 0, errMissingMinAnnotation + } + i, err := strconv.Atoi(val) + if err != nil { + return 0, errors.Wrapf(err, "%s", errInvalidMinAnnotation) + } + return i, nil +} + +// maxSize returns the maximum value encoded in the annotations keyed +// by nodeGroupMaxSizeAnnotationKey. Returns errMissingMaxAnnotation +// if the annotation doesn't exist or errInvalidMaxAnnotation if the +// value is not of type int. +func maxSize(annotations map[string]string) (int, error) { + val, found := annotations[nodeGroupMaxSizeAnnotationKey] + if !found { + return 0, errMissingMaxAnnotation + } + i, err := strconv.Atoi(val) + if err != nil { + return 0, errors.Wrapf(err, "%s", errInvalidMaxAnnotation) + } + return i, nil +} + +func parseScalingBounds(annotations map[string]string) (int, int, error) { + minSize, err := minSize(annotations) + if err != nil && err != errMissingMinAnnotation { + return 0, 0, err + } + + if minSize < 0 { + return 0, 0, errInvalidMinAnnotation + } + + maxSize, err := maxSize(annotations) + if err != nil && err != errMissingMaxAnnotation { + return 0, 0, err + } + + if maxSize < 0 { + return 0, 0, errInvalidMaxAnnotation + } + + if maxSize < minSize { + return 0, 0, errInvalidMaxAnnotation + } + + return minSize, maxSize, nil +} + +func machineOwnerRef(machine *Machine) *metav1.OwnerReference { + for _, ref := range machine.OwnerReferences { + if ref.Kind == "MachineSet" && ref.Name != "" { + return ref.DeepCopy() + } + } + + return nil +} + +func machineIsOwnedByMachineSet(machine *Machine, machineSet *MachineSet) bool { + if ref := machineOwnerRef(machine); ref != nil { + return ref.UID == machineSet.UID + } + return false +} + +func machineSetMachineDeploymentRef(machineSet *MachineSet) *metav1.OwnerReference { + for _, ref := range machineSet.OwnerReferences { + if ref.Kind == "MachineDeployment" { + return ref.DeepCopy() + } + } + + return nil +} + +func machineSetHasMachineDeploymentOwnerRef(machineSet *MachineSet) bool { + return machineSetMachineDeploymentRef(machineSet) != nil +} + +func machineSetIsOwnedByMachineDeployment(machineSet *MachineSet, machineDeployment *MachineDeployment) bool { + if ref := machineSetMachineDeploymentRef(machineSet); ref != nil { + return ref.UID == machineDeployment.UID + } + return false +} diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils_test.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils_test.go new file mode 100644 index 000000000000..2aefc435d2e9 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils_test.go @@ -0,0 +1,371 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clusterapi + +import ( + "strings" + "testing" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + uuid1 = "ec21c5fb-a3d5-a45f-887b-6b49aa8fc218" + uuid2 = "ec23ebb0-bc60-443f-d139-046ec5046283" +) + +func TestUtilParseScalingBounds(t *testing.T) { + for i, tc := range []struct { + description string + annotations map[string]string + error error + min int + max int + }{{ + description: "missing min annotation defaults to 0 and no error", + annotations: map[string]string{ + nodeGroupMaxSizeAnnotationKey: "0", + }, + }, { + description: "missing max annotation defaults to 0 and no error", + annotations: map[string]string{ + nodeGroupMinSizeAnnotationKey: "0", + }, + }, { + description: "invalid min errors", + annotations: map[string]string{ + nodeGroupMinSizeAnnotationKey: "-1", + nodeGroupMaxSizeAnnotationKey: "0", + }, + error: errInvalidMinAnnotation, + }, { + description: "invalid min errors", + annotations: map[string]string{ + nodeGroupMinSizeAnnotationKey: "not-an-int", + nodeGroupMaxSizeAnnotationKey: "0", + }, + error: errInvalidMinAnnotation, + }, { + description: "invalid max errors", + annotations: map[string]string{ + nodeGroupMinSizeAnnotationKey: "0", + nodeGroupMaxSizeAnnotationKey: "-1", + }, + error: errInvalidMaxAnnotation, + }, { + description: "invalid max errors", + annotations: map[string]string{ + nodeGroupMinSizeAnnotationKey: "0", + nodeGroupMaxSizeAnnotationKey: "not-an-int", + }, + error: errInvalidMaxAnnotation, + }, { + description: "negative min errors", + annotations: map[string]string{ + nodeGroupMinSizeAnnotationKey: "-1", + nodeGroupMaxSizeAnnotationKey: "0", + }, + error: errInvalidMinAnnotation, + }, { + description: "negative max errors", + annotations: map[string]string{ + nodeGroupMinSizeAnnotationKey: "0", + nodeGroupMaxSizeAnnotationKey: "-1", + }, + error: errInvalidMaxAnnotation, + }, { + description: "max < min errors", + annotations: map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "0", + }, + error: errInvalidMaxAnnotation, + }, { + description: "result is: min 0, max 0", + annotations: map[string]string{ + nodeGroupMinSizeAnnotationKey: "0", + nodeGroupMaxSizeAnnotationKey: "0", + }, + min: 0, + max: 0, + }, { + description: "result is min 0, max 1", + annotations: map[string]string{ + nodeGroupMinSizeAnnotationKey: "0", + nodeGroupMaxSizeAnnotationKey: "1", + }, + min: 0, + max: 1, + }} { + t.Run(tc.description, func(t *testing.T) { + machineSet := MachineSet{ + ObjectMeta: v1.ObjectMeta{ + Annotations: tc.annotations, + }, + } + + min, max, err := parseScalingBounds(machineSet.Annotations) + if tc.error != nil && err == nil { + t.Fatalf("test #%d: expected an error", i) + } + + if tc.error != nil && tc.error != err { + if !strings.HasPrefix(err.Error(), tc.error.Error()) { + t.Errorf("expected message to have prefix %q, got %q", tc.error.Error(), err) + } + } + + if tc.error == nil { + if tc.min != min { + t.Errorf("expected min %d, got %d", tc.min, min) + } + if tc.max != max { + t.Errorf("expected max %d, got %d", tc.max, max) + } + } + }) + } +} + +func TestUtilMachineSetIsOwnedByMachineDeployment(t *testing.T) { + for _, tc := range []struct { + description string + machineSet MachineSet + machineDeployment MachineDeployment + owned bool + }{{ + description: "not owned as no owner references", + machineSet: MachineSet{}, + machineDeployment: MachineDeployment{}, + owned: false, + }, { + description: "not owned as not the same Kind", + machineSet: MachineSet{ + ObjectMeta: v1.ObjectMeta{ + OwnerReferences: []v1.OwnerReference{{ + Kind: "Other", + }}, + }, + }, + machineDeployment: MachineDeployment{}, + owned: false, + }, { + description: "not owned because no OwnerReference.Name", + machineSet: MachineSet{ + ObjectMeta: v1.ObjectMeta{ + OwnerReferences: []v1.OwnerReference{{ + Kind: "MachineSet", + UID: uuid1, + }}, + }, + }, + machineDeployment: MachineDeployment{ + ObjectMeta: v1.ObjectMeta{ + UID: uuid1, + }, + }, + owned: false, + }, { + description: "not owned as UID values don't match", + machineSet: MachineSet{ + ObjectMeta: v1.ObjectMeta{ + OwnerReferences: []v1.OwnerReference{{ + Kind: "MachineSet", + Name: "foo", + UID: uuid2, + }}, + }, + }, + machineDeployment: MachineDeployment{ + TypeMeta: v1.TypeMeta{ + Kind: "MachineDeployment", + }, + ObjectMeta: v1.ObjectMeta{ + UID: uuid1, + }, + }, + owned: false, + }, { + description: "owned as UID values match and same Kind and Name not empty", + machineSet: MachineSet{ + ObjectMeta: v1.ObjectMeta{ + OwnerReferences: []v1.OwnerReference{{ + Kind: "MachineDeployment", + Name: "foo", + UID: uuid1, + }}, + }, + }, + machineDeployment: MachineDeployment{ + TypeMeta: v1.TypeMeta{ + Kind: "MachineDeployment", + }, + ObjectMeta: v1.ObjectMeta{ + Name: "foo", + UID: uuid1, + }, + }, + owned: true, + }} { + t.Run(tc.description, func(t *testing.T) { + owned := machineSetIsOwnedByMachineDeployment(&tc.machineSet, &tc.machineDeployment) + if tc.owned != owned { + t.Errorf("expected %t, got %t", tc.owned, owned) + } + }) + } +} + +func TestUtilMachineIsOwnedByMachineSet(t *testing.T) { + for _, tc := range []struct { + description string + machine Machine + machineSet MachineSet + owned bool + }{{ + description: "not owned as no owner references", + machine: Machine{}, + machineSet: MachineSet{}, + owned: false, + }, { + description: "not owned as not the same Kind", + machine: Machine{ + ObjectMeta: v1.ObjectMeta{ + OwnerReferences: []v1.OwnerReference{{ + Kind: "Other", + }}, + }, + }, + machineSet: MachineSet{}, + owned: false, + }, { + description: "not owned because no OwnerReference.Name", + machine: Machine{ + ObjectMeta: v1.ObjectMeta{ + OwnerReferences: []v1.OwnerReference{{ + Kind: "MachineSet", + UID: uuid1, + }}, + }, + }, + machineSet: MachineSet{ + ObjectMeta: v1.ObjectMeta{ + UID: uuid1, + }, + }, + owned: false, + }, { + description: "not owned as UID values don't match", + machine: Machine{ + ObjectMeta: v1.ObjectMeta{ + OwnerReferences: []v1.OwnerReference{{ + Kind: "MachineSet", + Name: "foo", + UID: uuid2, + }}, + }, + }, + machineSet: MachineSet{ + TypeMeta: v1.TypeMeta{ + Kind: "MachineSet", + }, + ObjectMeta: v1.ObjectMeta{ + UID: uuid1, + }, + }, + owned: false, + }, { + description: "owned as UID values match and same Kind and Name not empty", + machine: Machine{ + ObjectMeta: v1.ObjectMeta{ + OwnerReferences: []v1.OwnerReference{{ + Kind: "MachineSet", + Name: "foo", + UID: uuid1, + }}, + }, + }, + machineSet: MachineSet{ + TypeMeta: v1.TypeMeta{ + Kind: "MachineSet", + }, + ObjectMeta: v1.ObjectMeta{ + Name: "foo", + UID: uuid1, + }, + }, + owned: true, + }} { + t.Run(tc.description, func(t *testing.T) { + owned := machineIsOwnedByMachineSet(&tc.machine, &tc.machineSet) + if tc.owned != owned { + t.Errorf("expected %t, got %t", tc.owned, owned) + } + }) + } +} + +func TestUtilMachineSetMachineDeploymentOwnerRef(t *testing.T) { + for _, tc := range []struct { + description string + machineSet MachineSet + machineDeployment MachineDeployment + owned bool + }{{ + description: "machineset not owned as no owner references", + machineSet: MachineSet{}, + machineDeployment: MachineDeployment{}, + owned: false, + }, { + description: "machineset not owned as ownerref not a MachineDeployment", + machineSet: MachineSet{ + ObjectMeta: v1.ObjectMeta{ + OwnerReferences: []v1.OwnerReference{{ + Kind: "Other", + }}, + }, + }, + machineDeployment: MachineDeployment{}, + owned: false, + }, { + description: "machineset owned as Kind matches and Name not empty", + machineSet: MachineSet{ + ObjectMeta: v1.ObjectMeta{ + OwnerReferences: []v1.OwnerReference{{ + Kind: "MachineDeployment", + Name: "foo", + }}, + }, + }, + machineDeployment: MachineDeployment{ + TypeMeta: v1.TypeMeta{ + Kind: "MachineDeployment", + }, + ObjectMeta: v1.ObjectMeta{ + Name: "foo", + }, + }, + owned: true, + }} { + t.Run(tc.description, func(t *testing.T) { + owned := machineSetHasMachineDeploymentOwnerRef(&tc.machineSet) + if tc.owned != owned { + t.Errorf("expected %t, got %t", tc.owned, owned) + } + }) + } +} From f83d0dd8106a801a7c7162c1f6cb2b32c3c95e83 Mon Sep 17 00:00:00 2001 From: Andrew McDermott Date: Mon, 2 Mar 2020 12:21:24 +0000 Subject: [PATCH 04/10] cloudprovider/clusterapi: copy cluster-api v1alpha types These are copied to facilitate testing. They are not meant to reflect upstream clusterapi/v1alpha1 - in fact, fields have been removed. They are here to support the switch to unstructured types in the tests without having to rewrite all of the unit tests. --- .../cloudprovider/clusterapi/machine_types.go | 73 ++++ .../clusterapi/machinedeployment_types.go | 55 +++ .../clusterapi/machineset_types.go | 78 ++++ .../clusterapi/zz_generated.deepcopy.go | 355 ++++++++++++++++++ 4 files changed, 561 insertions(+) create mode 100644 cluster-autoscaler/cloudprovider/clusterapi/machine_types.go create mode 100644 cluster-autoscaler/cloudprovider/clusterapi/machinedeployment_types.go create mode 100644 cluster-autoscaler/cloudprovider/clusterapi/machineset_types.go create mode 100644 cluster-autoscaler/cloudprovider/clusterapi/zz_generated.deepcopy.go diff --git a/cluster-autoscaler/cloudprovider/clusterapi/machine_types.go b/cluster-autoscaler/cloudprovider/clusterapi/machine_types.go new file mode 100644 index 000000000000..4f4e968ac20f --- /dev/null +++ b/cluster-autoscaler/cloudprovider/clusterapi/machine_types.go @@ -0,0 +1,73 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clusterapi + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Machine is the Schema for the machines API +type Machine struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec MachineSpec `json:"spec,omitempty"` + Status MachineStatus `json:"status,omitempty"` +} + +// MachineSpec defines the desired state of Machine +type MachineSpec struct { + // ObjectMeta will autopopulate the Node created. Use this to + // indicate what labels, annotations, name prefix, etc., should be used + // when creating the Node. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Taints is the full, authoritative list of taints to apply to the corresponding + // Node. This list will overwrite any modifications made to the Node on + // an ongoing basis. + // +optional + Taints []corev1.Taint `json:"taints,omitempty"` + + // ProviderID is the identification ID of the machine provided by the provider. + // This field must match the provider ID as seen on the node object corresponding to this machine. + // This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler + // with cluster-api as provider. Clean-up login in the autoscaler compares machines v/s nodes to find out + // machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a + // generic out-of-tree provider for autoscaler, this field is required by autoscaler to be + // able to have a provider view of the list of machines. Another list of nodes is queries from the k8s apiserver + // and then comparison is done to find out unregistered machines and are marked for delete. + // This field will be set by the actuators and consumed by higher level entities like autoscaler who will + // be interfacing with cluster-api as generic provider. + // +optional + ProviderID *string `json:"providerID,omitempty"` +} + +// MachineStatus defines the observed state of Machine +type MachineStatus struct { + // NodeRef will point to the corresponding Node if it exists. + // +optional + NodeRef *corev1.ObjectReference `json:"nodeRef,omitempty"` +} + +// MachineList contains a list of Machine +type MachineList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Machine `json:"items"` +} diff --git a/cluster-autoscaler/cloudprovider/clusterapi/machinedeployment_types.go b/cluster-autoscaler/cloudprovider/clusterapi/machinedeployment_types.go new file mode 100644 index 000000000000..943cb3206923 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/clusterapi/machinedeployment_types.go @@ -0,0 +1,55 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clusterapi + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// MachineDeploymentSpec is the internal autoscaler Schema for MachineDeploymentSpec +type MachineDeploymentSpec struct { + // Number of desired machines. Defaults to 1. + // This is a pointer to distinguish between explicit zero and not specified. + Replicas *int32 `json:"replicas,omitempty"` + + // Label selector for machines. Existing MachineSets whose machines are + // selected by this will be the ones affected by this deployment. + // It must match the machine template's labels. + Selector metav1.LabelSelector `json:"selector"` + + // Template describes the machines that will be created. + Template MachineTemplateSpec `json:"template"` +} + +// MachineDeploymentStatus is the internal autoscaler Schema for MachineDeploymentStatus +type MachineDeploymentStatus struct{} + +// MachineDeployment is the internal autoscaler Schema for MachineDeployment +type MachineDeployment struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec MachineDeploymentSpec `json:"spec,omitempty"` + Status MachineDeploymentStatus `json:"status,omitempty"` +} + +// MachineDeploymentList is the internal autoscaler Schema for MachineDeploymentList +type MachineDeploymentList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MachineDeployment `json:"items"` +} diff --git a/cluster-autoscaler/cloudprovider/clusterapi/machineset_types.go b/cluster-autoscaler/cloudprovider/clusterapi/machineset_types.go new file mode 100644 index 000000000000..38232d7fa947 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/clusterapi/machineset_types.go @@ -0,0 +1,78 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clusterapi + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// MachineSet is the internal autoscaler Schema for machineSets +type MachineSet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec MachineSetSpec `json:"spec,omitempty"` + Status MachineSetStatus `json:"status,omitempty"` +} + +// MachineSetSpec is the internal autoscaler Schema for MachineSetSpec +type MachineSetSpec struct { + // Replicas is the number of desired replicas. + // This is a pointer to distinguish between explicit zero and unspecified. + // Defaults to 1. + // +optional + Replicas *int32 `json:"replicas,omitempty"` + + // MinReadySeconds is the minimum number of seconds for which a newly created machine should be ready. + // Defaults to 0 (machine will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 `json:"minReadySeconds,omitempty"` + + // Selector is a label query over machines that should match the replica count. + // Label keys and values that must match in order to be controlled by this MachineSet. + // It must match the machine template's labels. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + Selector metav1.LabelSelector `json:"selector"` + + // Template is the object that describes the machine that will be created if + // insufficient replicas are detected. + // +optional + Template MachineTemplateSpec `json:"template,omitempty"` +} + +// MachineTemplateSpec is the internal autoscaler Schema for MachineTemplateSpec +type MachineTemplateSpec struct { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Specification of the desired behavior of the machine. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Spec MachineSpec `json:"spec,omitempty"` +} + +// MachineSetStatus is the internal autoscaler Schema for MachineSetStatus +type MachineSetStatus struct{} + +// MachineSetList is the internal autoscaler Schema for MachineSetList +type MachineSetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MachineSet `json:"items"` +} diff --git a/cluster-autoscaler/cloudprovider/clusterapi/zz_generated.deepcopy.go b/cluster-autoscaler/cloudprovider/clusterapi/zz_generated.deepcopy.go new file mode 100644 index 000000000000..9948f120e894 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/clusterapi/zz_generated.deepcopy.go @@ -0,0 +1,355 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by main. DO NOT EDIT. + +package clusterapi + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Machine) DeepCopyInto(out *Machine) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Machine. +func (in *Machine) DeepCopy() *Machine { + if in == nil { + return nil + } + out := new(Machine) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Machine) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineDeployment) DeepCopyInto(out *MachineDeployment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineDeployment. +func (in *MachineDeployment) DeepCopy() *MachineDeployment { + if in == nil { + return nil + } + out := new(MachineDeployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachineDeployment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineDeploymentList) DeepCopyInto(out *MachineDeploymentList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MachineDeployment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineDeploymentList. +func (in *MachineDeploymentList) DeepCopy() *MachineDeploymentList { + if in == nil { + return nil + } + out := new(MachineDeploymentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachineDeploymentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineDeploymentSpec) DeepCopyInto(out *MachineDeploymentSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + in.Selector.DeepCopyInto(&out.Selector) + in.Template.DeepCopyInto(&out.Template) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineDeploymentSpec. +func (in *MachineDeploymentSpec) DeepCopy() *MachineDeploymentSpec { + if in == nil { + return nil + } + out := new(MachineDeploymentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineDeploymentStatus) DeepCopyInto(out *MachineDeploymentStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineDeploymentStatus. +func (in *MachineDeploymentStatus) DeepCopy() *MachineDeploymentStatus { + if in == nil { + return nil + } + out := new(MachineDeploymentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineList) DeepCopyInto(out *MachineList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Machine, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineList. +func (in *MachineList) DeepCopy() *MachineList { + if in == nil { + return nil + } + out := new(MachineList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachineList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineSet) DeepCopyInto(out *MachineSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSet. +func (in *MachineSet) DeepCopy() *MachineSet { + if in == nil { + return nil + } + out := new(MachineSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachineSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineSetList) DeepCopyInto(out *MachineSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MachineSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSetList. +func (in *MachineSetList) DeepCopy() *MachineSetList { + if in == nil { + return nil + } + out := new(MachineSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachineSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineSetSpec) DeepCopyInto(out *MachineSetSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + in.Selector.DeepCopyInto(&out.Selector) + in.Template.DeepCopyInto(&out.Template) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSetSpec. +func (in *MachineSetSpec) DeepCopy() *MachineSetSpec { + if in == nil { + return nil + } + out := new(MachineSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineSetStatus) DeepCopyInto(out *MachineSetStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSetStatus. +func (in *MachineSetStatus) DeepCopy() *MachineSetStatus { + if in == nil { + return nil + } + out := new(MachineSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineSpec) DeepCopyInto(out *MachineSpec) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Taints != nil { + in, out := &in.Taints, &out.Taints + *out = make([]v1.Taint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ProviderID != nil { + in, out := &in.ProviderID, &out.ProviderID + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSpec. +func (in *MachineSpec) DeepCopy() *MachineSpec { + if in == nil { + return nil + } + out := new(MachineSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineStatus) DeepCopyInto(out *MachineStatus) { + *out = *in + if in.NodeRef != nil { + in, out := &in.NodeRef, &out.NodeRef + *out = new(v1.ObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineStatus. +func (in *MachineStatus) DeepCopy() *MachineStatus { + if in == nil { + return nil + } + out := new(MachineStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineTemplateSpec) DeepCopyInto(out *MachineTemplateSpec) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineTemplateSpec. +func (in *MachineTemplateSpec) DeepCopy() *MachineTemplateSpec { + if in == nil { + return nil + } + out := new(MachineTemplateSpec) + in.DeepCopyInto(out) + return out +} From 699c0b83b41965780fef265d8a0ad0b8dd879fd3 Mon Sep 17 00:00:00 2001 From: Enxebre Date: Mon, 10 Feb 2020 15:19:07 +0100 Subject: [PATCH 05/10] Let Nodes() return the list of all machines The autoscaler expects provider implementations nodeGroups to implement the Nodes() function to return the number of instances belonging to the group regardless of they have become a kubernetes node or not. This information is then used for instance to realise about unregistered nodes https://github.com/kubernetes/autoscaler/blob/bf3a9fb52e3214dff0bea5ef2b97f17ad00a7702/cluster-autoscaler/clusterstate/clusterstate.go#L307-L311 --- .../clusterapi/clusterapi_controller.go | 26 +++--- .../clusterapi_machinedeployment.go | 4 +- .../clusterapi/clusterapi_machineset.go | 2 +- .../clusterapi/clusterapi_nodegroup.go | 1 + .../clusterapi/clusterapi_nodegroup_test.go | 85 +++++++++++++++---- .../clusterapi/clusterapi_scalableresource.go | 2 +- 6 files changed, 83 insertions(+), 37 deletions(-) diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_controller.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_controller.go index 7ecb5ab5b6aa..585a81476143 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_controller.go +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_controller.go @@ -336,25 +336,22 @@ func (c *machineController) machineSetProviderIDs(machineSet *MachineSet) ([]str return nil, fmt.Errorf("error listing machines: %v", err) } - var nodes []string - + var providerIDs []string for _, machine := range machines { + if machine.Spec.ProviderID == nil || *machine.Spec.ProviderID == "" { + klog.Warningf("Machine %q has no providerID", machine.Name) + } + if machine.Spec.ProviderID != nil && *machine.Spec.ProviderID != "" { - // Prefer machine<=>node mapping using ProviderID - node, err := c.findNodeByProviderID(*machine.Spec.ProviderID) - if err != nil { - return nil, err - } - if node != nil { - nodes = append(nodes, node.Spec.ProviderID) - continue - } + providerIDs = append(providerIDs, *machine.Spec.ProviderID) + continue } if machine.Status.NodeRef == nil { klog.V(4).Infof("Status.NodeRef of machine %q is currently nil", machine.Name) continue } + if machine.Status.NodeRef.Kind != "Node" { klog.Errorf("Status.NodeRef of machine %q does not reference a node (rather %q)", machine.Name, machine.Status.NodeRef.Kind) continue @@ -366,13 +363,12 @@ func (c *machineController) machineSetProviderIDs(machineSet *MachineSet) ([]str } if node != nil { - nodes = append(nodes, node.Spec.ProviderID) + providerIDs = append(providerIDs, node.Spec.ProviderID) } } - klog.V(4).Infof("nodegroup %s has nodes %v", machineSet.Name, nodes) - - return nodes, nil + klog.V(4).Infof("nodegroup %s has nodes %v", machineSet.Name, providerIDs) + return providerIDs, nil } func (c *machineController) filterAllMachineSets(f machineSetFilterFunc) error { diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_machinedeployment.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_machinedeployment.go index e10845307217..100c1ff6bbf8 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_machinedeployment.go +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_machinedeployment.go @@ -61,11 +61,11 @@ func (r machineDeploymentScalableResource) Nodes() ([]string, error) { if err := r.controller.filterAllMachineSets(func(machineSet *MachineSet) error { if machineSetIsOwnedByMachineDeployment(machineSet, r.machineDeployment) { - names, err := r.controller.machineSetNodeNames(machineSet) + providerIDs, err := r.controller.machineSetProviderIDs(machineSet) if err != nil { return err } - result = append(result, names...) + result = append(result, providerIDs...) } return nil }); err != nil { diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_machineset.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_machineset.go index 0cf2c1a7051d..98655bfaf3b5 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_machineset.go +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_machineset.go @@ -57,7 +57,7 @@ func (r machineSetScalableResource) Namespace() string { } func (r machineSetScalableResource) Nodes() ([]string, error) { - return r.controller.machineSetNodeNames(r.machineSet) + return r.controller.machineSetProviderIDs(r.machineSet) } func (r machineSetScalableResource) Replicas() int32 { diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup.go index f3df8359b3f9..d4c1580e3148 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup.go +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup.go @@ -179,6 +179,7 @@ func (ng *nodegroup) Debug() string { } // Nodes returns a list of all nodes that belong to this node group. +// This includes instances that might have not become a kubernetes node yet. func (ng *nodegroup) Nodes() ([]cloudprovider.Instance, error) { nodes, err := ng.scalableResource.Nodes() if err != nil { diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup_test.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup_test.go index 7ed410f1dd9d..5dfcaddd7a81 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup_test.go +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup_test.go @@ -401,10 +401,12 @@ func TestNodeGroupIncreaseSize(t *testing.T) { func TestNodeGroupDecreaseTargetSize(t *testing.T) { type testCase struct { - description string - delta int - initial int32 - expected int32 + description string + delta int + initial int32 + targetSizeIncrement int32 + expected int32 + expectedError bool } test := func(t *testing.T, tc *testCase, testConfig *testConfig) { @@ -421,17 +423,49 @@ func TestNodeGroupDecreaseTargetSize(t *testing.T) { } ng := nodegroups[0] - currReplicas, err := ng.TargetSize() - if err := controller.machineSetInformer.Informer().GetStore().Add(newUnstructuredFromMachineSet(testConfig.machineSet)); err != nil { - } + // DecreaseTargetSize should only decrease the size when the current target size of the nodeGroup + // is bigger than the number existing instances for that group. We force such a scenario with targetSizeIncrement. + switch v := (ng.scalableResource).(type) { + case *machineSetScalableResource: + testConfig.machineSet.Spec.Replicas = int32ptr(*testConfig.machineSet.Spec.Replicas + tc.targetSizeIncrement) + ms, err := ng.machineapiClient.MachineSets(ng.Namespace()).Update(testConfig.machineSet) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } - if err := controller.nodeInformer.GetStore().Delete(testConfig.nodes[0]); err != nil { - if err := controller.machineDeploymentInformer.Informer().GetStore().Add(newUnstructuredFromMachineDeployment(testConfig.machineDeployment)); err != nil { + if err := controller.machineSetInformer.Informer().GetStore().Add(ms); err != nil { + t.Fatalf("failed to add new machine: %v", err) + } + case *machineDeploymentScalableResource: + testConfig.machineDeployment.Spec.Replicas = int32ptr(*testConfig.machineDeployment.Spec.Replicas + tc.targetSizeIncrement) + md, err := ng.machineapiClient.MachineDeployments(ng.Namespace()).Update(testConfig.machineDeployment) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if err := controller.machineDeploymentInformer.Informer().GetStore().Add(md); err != nil { + t.Fatalf("failed to add new machine: %v", err) + } + default: + t.Errorf("unexpected type: %T", v) } + // A nodegroup is immutable; get a fresh copy after adding targetSizeIncrement. + nodegroups, err = controller.nodeGroups() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + ng = nodegroups[0] - if err := ng.DecreaseTargetSize(tc.delta); err != nil { + currReplicas, err := ng.TargetSize() + if err != nil { t.Fatalf("unexpected error: %v", err) } + if currReplicas != int(tc.initial)+int(tc.targetSizeIncrement) { + t.Errorf("initially expected %v, got %v", tc.initial, currReplicas) + } + + if err := ng.DecreaseTargetSize(tc.delta); (err != nil) != tc.expectedError { + t.Fatalf("expected error: %v, got: %v", tc.expectedError, err) + } switch v := (ng.scalableResource).(type) { case *machineSetScalableResource: @@ -464,20 +498,35 @@ func TestNodeGroupDecreaseTargetSize(t *testing.T) { t.Run("MachineSet", func(t *testing.T) { tc := testCase{ - description: "decrease by 1", - initial: 3, - expected: 2, - delta: -1, + description: "Same number of existing instances and node group target size should error", + initial: 3, + targetSizeIncrement: 0, + expected: 3, + delta: -1, + expectedError: true, + } + test(t, &tc, createMachineSetTestConfig(testNamespace, int(tc.initial), annotations)) + }) + + t.Run("MachineSet", func(t *testing.T) { + tc := testCase{ + description: "A node group with targe size 4 but only 3 existing instances should decrease by 1", + initial: 3, + targetSizeIncrement: 1, + expected: 3, + delta: -1, } test(t, &tc, createMachineSetTestConfig(testNamespace, int(tc.initial), annotations)) }) t.Run("MachineDeployment", func(t *testing.T) { tc := testCase{ - description: "decrease by 1", - initial: 3, - expected: 2, - delta: -1, + description: "Same number of existing instances and node group target size should error", + initial: 3, + targetSizeIncrement: 0, + expected: 3, + delta: -1, + expectedError: true, } test(t, &tc, createMachineDeploymentTestConfig(testNamespace, int(tc.initial), annotations)) }) diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_scalableresource.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_scalableresource.go index 3a4aa02161ce..6c47da3004db 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_scalableresource.go +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_scalableresource.go @@ -34,7 +34,7 @@ type scalableResource interface { // Namespace returns the namespace the resource is in Namespace() string - // Nodes returns a list of all nodes that belong to this + // Nodes returns a list of all machines that already have or should become nodes that belong to this // resource Nodes() ([]string, error) From eae157910084754cf26d50e75f2718b8eb54ab9e Mon Sep 17 00:00:00 2001 From: Joel Speed Date: Wed, 19 Feb 2020 10:57:07 +0000 Subject: [PATCH 06/10] Ensure DeleteNodes doesn't delete a node twice --- .../clusterapi/clusterapi_nodegroup.go | 9 +++ .../clusterapi/clusterapi_nodegroup_test.go | 77 +++++++++++++++---- 2 files changed, 73 insertions(+), 13 deletions(-) diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup.go index d4c1580e3148..c5e5407fc25e 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup.go +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup.go @@ -89,6 +89,10 @@ func (ng *nodegroup) DeleteNodes(nodes []*corev1.Node) error { return nil } + if actualNodeGroup == nil { + return fmt.Errorf("no node group found for node %q", node.Spec.ProviderID) + } + if actualNodeGroup.Id() != ng.Id() { return fmt.Errorf("node %q doesn't belong to node group %q", node.Spec.ProviderID, ng.Id()) } @@ -117,6 +121,11 @@ func (ng *nodegroup) DeleteNodes(nodes []*corev1.Node) error { machine = machine.DeepCopy() + if !machine.GetDeletionTimestamp().IsZero() { + // The machine for this node is already being deleted + continue + } + if machine.Annotations == nil { machine.Annotations = map[string]string{} } diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup_test.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup_test.go index 5dfcaddd7a81..c7818497ff14 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup_test.go +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup_test.go @@ -22,6 +22,7 @@ import ( "sort" "strings" "testing" + "time" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" @@ -428,22 +429,12 @@ func TestNodeGroupDecreaseTargetSize(t *testing.T) { switch v := (ng.scalableResource).(type) { case *machineSetScalableResource: testConfig.machineSet.Spec.Replicas = int32ptr(*testConfig.machineSet.Spec.Replicas + tc.targetSizeIncrement) - ms, err := ng.machineapiClient.MachineSets(ng.Namespace()).Update(testConfig.machineSet) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if err := controller.machineSetInformer.Informer().GetStore().Add(ms); err != nil { + if err := controller.machineSetInformer.Informer().GetStore().Add(newUnstructuredFromMachineSet(testConfig.machineSet)); err != nil { t.Fatalf("failed to add new machine: %v", err) } case *machineDeploymentScalableResource: testConfig.machineDeployment.Spec.Replicas = int32ptr(*testConfig.machineDeployment.Spec.Replicas + tc.targetSizeIncrement) - md, err := ng.machineapiClient.MachineDeployments(ng.Namespace()).Update(testConfig.machineDeployment) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if err := controller.machineDeploymentInformer.Informer().GetStore().Add(md); err != nil { - t.Fatalf("failed to add new machine: %v", err) + if err := controller.machineDeploymentInformer.Informer().GetStore().Add(newUnstructuredFromMachineDeployment(testConfig.machineDeployment)); err != nil { } default: t.Errorf("unexpected type: %T", v) @@ -815,6 +806,7 @@ func TestNodeGroupMachineSetDeleteNodesWithMismatchedNodes(t *testing.T) { }) } +func TestNodeGroupDeleteNodesTwice(t *testing.T) { addDeletionTimestamp := func(t *testing.T, controller *machineController, machine *Machine) error { // Simulate delete that would have happened if the // Machine API controllers were running Don't actually @@ -825,15 +817,50 @@ func TestNodeGroupMachineSetDeleteNodesWithMismatchedNodes(t *testing.T) { return controller.machineInformer.Informer().GetStore().Update(newUnstructuredFromMachine(machine)) } + test := func(t *testing.T, testConfig *testConfig) { + controller, stop := mustCreateTestController(t, testConfig) + defer stop() + + nodegroups, err := controller.nodeGroups() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if l := len(nodegroups); l != 1 { + t.Fatalf("expected 1 nodegroup, got %d", l) + } + + ng := nodegroups[0] + nodeNames, err := ng.Nodes() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if len(nodeNames) != len(testConfig.nodes) { + t.Fatalf("expected len=%v, got len=%v", len(testConfig.nodes), len(nodeNames)) + } + + sort.SliceStable(nodeNames, func(i, j int) bool { + return nodeNames[i].Id < nodeNames[j].Id + }) + + for i := 0; i < len(nodeNames); i++ { + if nodeNames[i].Id != testConfig.nodes[i].Spec.ProviderID { + t.Fatalf("expected %q, got %q", testConfig.nodes[i].Spec.ProviderID, nodeNames[i].Id) + } + } + // Assert that we have no DeletionTimestamp for i := 7; i < len(testConfig.machines); i++ { if !testConfig.machines[i].ObjectMeta.DeletionTimestamp.IsZero() { t.Fatalf("unexpected DeletionTimestamp") } } + if err := ng.DeleteNodes(testConfig.nodes[7:]); err != nil { t.Fatalf("unexpected error: %v", err) } + for i := 7; i < len(testConfig.machines); i++ { if err := addDeletionTimestamp(t, controller, testConfig.machines[i]); err != nil { t.Fatalf("unexpected err: %v", err) @@ -842,6 +869,7 @@ func TestNodeGroupMachineSetDeleteNodesWithMismatchedNodes(t *testing.T) { t.Fatalf("expected a DeletionTimestamp") } } + // TODO(frobware) We have a flaky test here because we // just called Delete and Update and the next call to // controller.nodeGroups() will sometimes get stale @@ -855,7 +883,7 @@ func TestNodeGroupMachineSetDeleteNodesWithMismatchedNodes(t *testing.T) { // // Running this test with a 500ms duration I see: // - // $ ./stress ./clusterapi.test -test.run TestNodeGroupDeleteNodesTwice -test.count 5 | ts | ts -i + // $ ./stress ./openshiftmachineapi.test -test.run TestNodeGroupDeleteNodesTwice -test.count 5 | ts | ts -i // 00:00:05 Feb 27 14:29:36 0 runs so far, 0 failures // 00:00:05 Feb 27 14:29:41 8 runs so far, 0 failures // 00:00:05 Feb 27 14:29:46 16 runs so far, 0 failures @@ -876,6 +904,7 @@ func TestNodeGroupMachineSetDeleteNodesWithMismatchedNodes(t *testing.T) { if err != nil { t.Fatalf("unexpected error: %v", err) } + ng = nodegroups[0] // Attempt to delete the nodes again which verifies @@ -889,7 +918,29 @@ func TestNodeGroupMachineSetDeleteNodesWithMismatchedNodes(t *testing.T) { if err != nil { t.Fatalf("unexpected error: %v", err) } + expectedSize := len(testConfig.machines) - len(testConfig.machines[7:]) if actualSize != expectedSize { t.Fatalf("expected %d nodes, got %d", expectedSize, actualSize) } + } + + // Note: 10 is an upper bound for the number of nodes/replicas + // Going beyond 10 will break the sorting that happens in the + // test() function because sort.Strings() will not do natural + // sorting and the expected semantics in test() will fail. + + t.Run("MachineSet", func(t *testing.T) { + test(t, createMachineSetTestConfig(testNamespace, 10, map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "10", + })) + }) + + t.Run("MachineDeployment", func(t *testing.T) { + test(t, createMachineDeploymentTestConfig(testNamespace, 10, map[string]string{ + nodeGroupMinSizeAnnotationKey: "1", + nodeGroupMaxSizeAnnotationKey: "10", + })) + }) +} From 7ba979866a257efa795c9fc0425f3d971d44b4da Mon Sep 17 00:00:00 2001 From: Enxebre Date: Thu, 27 Feb 2020 15:12:30 +0100 Subject: [PATCH 07/10] Make machine API swappable as an env variable --- .../cloudprovider/clusterapi/clusterapi_controller.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_controller.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_controller.go index 585a81476143..6195f3208cdd 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_controller.go +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_controller.go @@ -38,6 +38,7 @@ import ( const ( machineProviderIDIndex = "machineProviderIDIndex" nodeProviderIDIndex = "nodeProviderIDIndex" + defaultMachineAPI = "v1alpha2.cluster.x-k8s.io" ) // machineController watches for Nodes, Machines, MachineSets and @@ -280,14 +281,15 @@ func newMachineController( kubeInformerFactory := kubeinformers.NewSharedInformerFactory(kubeclient, 0) informerFactory := dynamicinformer.NewFilteredDynamicSharedInformerFactory(dynamicclient, 0, metav1.NamespaceAll, nil) - machineDeploymentResource, _ := schema.ParseResourceArg("machinedeployments.v1alpha2.cluster.x-k8s.io") + // TODO(alberto): let environment variable to override defaultMachineAPI + machineDeploymentResource, _ := schema.ParseResourceArg(fmt.Sprintf("machinedeployments.%v", defaultMachineAPI)) - machineSetResource, _ := schema.ParseResourceArg("machinesets.v1alpha2.cluster.x-k8s.io") + machineSetResource, _ := schema.ParseResourceArg(fmt.Sprintf("machinesets.%v", defaultMachineAPI)) if machineSetResource == nil { panic("MachineSetResource") } - machineResource, _ := schema.ParseResourceArg("machines.v1alpha2.cluster.x-k8s.io") + machineResource, _ := schema.ParseResourceArg(fmt.Sprintf("machines.%v", defaultMachineAPI)) if machineResource == nil { panic("machineResource") } From c5fa2b4cba4a55bc539f79526c8938177e5bd969 Mon Sep 17 00:00:00 2001 From: Andrew McDermott Date: Wed, 4 Mar 2020 09:01:28 +0000 Subject: [PATCH 08/10] Update OWNERS --- cluster-autoscaler/cloudprovider/clusterapi/OWNERS | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/cluster-autoscaler/cloudprovider/clusterapi/OWNERS b/cluster-autoscaler/cloudprovider/clusterapi/OWNERS index cdf20c0b1525..90085e6550b7 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/OWNERS +++ b/cluster-autoscaler/cloudprovider/clusterapi/OWNERS @@ -2,7 +2,13 @@ approvers: - frobware - enxebre - elmiko +- hardikdr +- detiber +- ncdc reviewers: - frobware - enxebre - elmiko +- hardikdr +- detiber +- ncdc From d9e3197daabe833a1ffc066ca6e9fe5607a2eb43 Mon Sep 17 00:00:00 2001 From: Andrew McDermott Date: Fri, 6 Mar 2020 10:09:29 +0000 Subject: [PATCH 09/10] Normalize providerID values We index on providerID but it turns out that those values on node and machine are not always consistent. Some encode region, some do not, for example. This commit normalizes all values through the normalizedProviderString(). To ensure that we catch all places I've introduced a new type and made the find() functions take this new type in lieu of a string. Unit tests have also been adjusted to introduce a 'test:///' prefix on the providerID value to further validate the change. This change allows CAPI to work out-of-the-box, assuming v1alpha2. It's also reasonable to assert that this consistency should be enforced elsewhere and to make this behaviour easily revertable I'm leaving this as a separate commit in this patch series. --- .../clusterapi/clusterapi_controller.go | 14 ++++----- .../clusterapi/clusterapi_controller_test.go | 16 +++++----- .../clusterapi/clusterapi_nodegroup.go | 4 +-- .../clusterapi/clusterapi_nodegroup_test.go | 9 +++--- .../clusterapi/clusterapi_utils.go | 10 ++++++ .../clusterapi/clusterapi_utils_test.go | 31 +++++++++++++++++++ 6 files changed, 62 insertions(+), 22 deletions(-) diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_controller.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_controller.go index 6195f3208cdd..d98ebebbb342 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_controller.go +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_controller.go @@ -74,13 +74,13 @@ func indexMachineByProviderID(obj interface{}) ([]string, error) { return nil, nil } - return []string{providerID}, nil + return []string{string(normalizedProviderString(providerID))}, nil } func indexNodeByProviderID(obj interface{}) ([]string, error) { if node, ok := obj.(*corev1.Node); ok { if node.Spec.ProviderID != "" { - return []string{node.Spec.ProviderID}, nil + return []string{string(normalizedProviderString(node.Spec.ProviderID))}, nil } return []string{}, nil } @@ -192,8 +192,8 @@ func (c *machineController) run(stopCh <-chan struct{}) error { // findMachineByProviderID finds machine matching providerID. A // DeepCopy() of the object is returned on success. -func (c *machineController) findMachineByProviderID(providerID string) (*Machine, error) { - objs, err := c.machineInformer.Informer().GetIndexer().ByIndex(machineProviderIDIndex, providerID) +func (c *machineController) findMachineByProviderID(providerID normalizedProviderID) (*Machine, error) { + objs, err := c.machineInformer.Informer().GetIndexer().ByIndex(machineProviderIDIndex, string(providerID)) if err != nil { return nil, err } @@ -448,7 +448,7 @@ func (c *machineController) nodeGroups() ([]*nodegroup, error) { } func (c *machineController) nodeGroupForNode(node *corev1.Node) (*nodegroup, error) { - machine, err := c.findMachineByProviderID(node.Spec.ProviderID) + machine, err := c.findMachineByProviderID(normalizedProviderString(node.Spec.ProviderID)) if err != nil { return nil, err } @@ -505,8 +505,8 @@ func (c *machineController) nodeGroupForNode(node *corev1.Node) (*nodegroup, err // findNodeByProviderID find the Node object keyed by provideID. // Returns nil if it cannot be found. A DeepCopy() of the object is // returned on success. -func (c *machineController) findNodeByProviderID(providerID string) (*corev1.Node, error) { - objs, err := c.nodeInformer.GetIndexer().ByIndex(nodeProviderIDIndex, providerID) +func (c *machineController) findNodeByProviderID(providerID normalizedProviderID) (*corev1.Node, error) { + objs, err := c.nodeInformer.GetIndexer().ByIndex(nodeProviderIDIndex, string(providerID)) if err != nil { return nil, err } diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_controller_test.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_controller_test.go index 646b18174ba0..29a85682991d 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_controller_test.go +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_controller_test.go @@ -205,7 +205,7 @@ func makeLinkedNodeAndMachine(i int, namespace string, owner v1.OwnerReference) }, }, Spec: corev1.NodeSpec{ - ProviderID: fmt.Sprintf("%s-%s-nodeid-%d", namespace, owner.Name, i), + ProviderID: fmt.Sprintf("test:////%s-%s-nodeid-%d", namespace, owner.Name, i), }, } @@ -224,7 +224,7 @@ func makeLinkedNodeAndMachine(i int, namespace string, owner v1.OwnerReference) }}, }, Spec: MachineSpec{ - ProviderID: pointer.StringPtr(fmt.Sprintf("%s-%s-nodeid-%d", namespace, owner.Name, i)), + ProviderID: pointer.StringPtr(fmt.Sprintf("test:////%s-%s-nodeid-%d", namespace, owner.Name, i)), }, Status: MachineStatus{ NodeRef: &corev1.ObjectReference{ @@ -421,7 +421,7 @@ func TestControllerFindMachineByProviderID(t *testing.T) { } // Test #1: Verify underlying machine provider ID matches - machine, err := controller.findMachineByProviderID(testConfig.nodes[0].Spec.ProviderID) + machine, err := controller.findMachineByProviderID(normalizedProviderString(testConfig.nodes[0].Spec.ProviderID)) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -440,7 +440,7 @@ func TestControllerFindMachineByProviderID(t *testing.T) { if err := controller.machineInformer.Informer().GetStore().Update(machine); err != nil { t.Fatalf("unexpected error updating machine, got %v", err) } - machine, err = controller.findMachineByProviderID(testConfig.nodes[0].Spec.ProviderID) + machine, err = controller.findMachineByProviderID(normalizedProviderString(testConfig.nodes[0].Spec.ProviderID)) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -824,7 +824,7 @@ func TestControllerFindMachineFromNodeAnnotation(t *testing.T) { } // Test #1: Verify machine can be found from node annotation - machine, err := controller.findMachineByProviderID(testConfig.nodes[0].Spec.ProviderID) + machine, err := controller.findMachineByProviderID(normalizedProviderString(testConfig.nodes[0].Spec.ProviderID)) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -842,7 +842,7 @@ func TestControllerFindMachineFromNodeAnnotation(t *testing.T) { if err := controller.nodeInformer.GetStore().Update(node); err != nil { t.Fatalf("unexpected error updating node, got %v", err) } - machine, err = controller.findMachineByProviderID(testConfig.nodes[0].Spec.ProviderID) + machine, err = controller.findMachineByProviderID(normalizedProviderString(testConfig.nodes[0].Spec.ProviderID)) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -938,7 +938,7 @@ func TestControllerMachineSetNodeNamesUsingProviderID(t *testing.T) { }) for i := range testConfig.nodes { - if nodeNames[i].Id != testConfig.nodes[i].Spec.ProviderID { + if nodeNames[i].Id != string(normalizedProviderString(testConfig.nodes[i].Spec.ProviderID)) { t.Fatalf("expected %q, got %q", testConfig.nodes[i].Spec.ProviderID, nodeNames[i].Id) } } @@ -986,7 +986,7 @@ func TestControllerMachineSetNodeNamesUsingStatusNodeRefName(t *testing.T) { }) for i := range testConfig.nodes { - if nodeNames[i].Id != testConfig.nodes[i].Spec.ProviderID { + if nodeNames[i].Id != string(normalizedProviderString(testConfig.nodes[i].Spec.ProviderID)) { t.Fatalf("expected %q, got %q", testConfig.nodes[i].Spec.ProviderID, nodeNames[i].Id) } } diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup.go index c5e5407fc25e..89731eee11ed 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup.go +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup.go @@ -111,7 +111,7 @@ func (ng *nodegroup) DeleteNodes(nodes []*corev1.Node) error { // suitable candidate for deletion and drop the replica count // by 1. Fail fast on any error. for _, node := range nodes { - machine, err := ng.machineController.findMachineByProviderID(node.Spec.ProviderID) + machine, err := ng.machineController.findMachineByProviderID(normalizedProviderString(node.Spec.ProviderID)) if err != nil { return err } @@ -198,7 +198,7 @@ func (ng *nodegroup) Nodes() ([]cloudprovider.Instance, error) { instances := make([]cloudprovider.Instance, len(nodes)) for i := range nodes { instances[i] = cloudprovider.Instance{ - Id: nodes[i], + Id: string(normalizedProviderString(nodes[i])), } } diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup_test.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup_test.go index c7818497ff14..51045309bc10 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup_test.go +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_nodegroup_test.go @@ -659,7 +659,7 @@ func TestNodeGroupDeleteNodes(t *testing.T) { }) for i := 0; i < len(nodeNames); i++ { - if nodeNames[i].Id != testConfig.nodes[i].Spec.ProviderID { + if nodeNames[i].Id != string(normalizedProviderString(testConfig.nodes[i].Spec.ProviderID)) { t.Fatalf("expected %q, got %q", testConfig.nodes[i].Spec.ProviderID, nodeNames[i].Id) } } @@ -722,7 +722,6 @@ func TestNodeGroupDeleteNodes(t *testing.T) { func TestNodeGroupMachineSetDeleteNodesWithMismatchedNodes(t *testing.T) { test := func(t *testing.T, expected int, testConfigs []*testConfig) { - t.Helper() testConfig0, testConfig1 := testConfigs[0], testConfigs[1] controller, stop := mustCreateTestController(t, testConfigs...) defer stop() @@ -756,7 +755,7 @@ func TestNodeGroupMachineSetDeleteNodesWithMismatchedNodes(t *testing.T) { expectedErr0 = `node "test-namespace1-machineset-0-nodeid-0" doesn't belong to node group "test-namespace0/machinedeployment-0"` } - if !strings.Contains(err0.Error(), expectedErr0) { + if !strings.Contains(err0.Error(), string(normalizedProviderString(expectedErr0))) { t.Errorf("expected: %q, got: %q", expectedErr0, err0.Error()) } @@ -771,7 +770,7 @@ func TestNodeGroupMachineSetDeleteNodesWithMismatchedNodes(t *testing.T) { expectedErr1 = `node "test-namespace0-machineset-0-nodeid-0" doesn't belong to node group "test-namespace1/machinedeployment-0"` } - if !strings.Contains(err1.Error(), expectedErr1) { + if !strings.Contains(err1.Error(), string(normalizedProviderString(expectedErr1))) { t.Errorf("expected: %q, got: %q", expectedErr1, err1.Error()) } @@ -845,7 +844,7 @@ func TestNodeGroupDeleteNodesTwice(t *testing.T) { }) for i := 0; i < len(nodeNames); i++ { - if nodeNames[i].Id != testConfig.nodes[i].Spec.ProviderID { + if nodeNames[i].Id != string(normalizedProviderString(testConfig.nodes[i].Spec.ProviderID)) { t.Fatalf("expected %q, got %q", testConfig.nodes[i].Spec.ProviderID, nodeNames[i].Id) } } diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils.go index 9f4246c36914..0e0f00151b0c 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils.go +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils.go @@ -18,6 +18,7 @@ package clusterapi import ( "strconv" + "strings" "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -48,6 +49,8 @@ var ( errInvalidMaxAnnotation = errors.New("invalid max annotation") ) +type normalizedProviderID string + // minSize returns the minimum value encoded in the annotations keyed // by nodeGroupMinSizeAnnotationKey. Returns errMissingMinAnnotation // if the annotation doesn't exist or errInvalidMinAnnotation if the @@ -143,3 +146,10 @@ func machineSetIsOwnedByMachineDeployment(machineSet *MachineSet, machineDeploym } return false } + +// normalizedProviderString splits s on '/' returning everything after +// the last '/'. +func normalizedProviderString(s string) normalizedProviderID { + split := strings.Split(s, "/") + return normalizedProviderID(split[len(split)-1]) +} diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils_test.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils_test.go index 2aefc435d2e9..41e852ab75fb 100644 --- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils_test.go +++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_utils_test.go @@ -369,3 +369,34 @@ func TestUtilMachineSetMachineDeploymentOwnerRef(t *testing.T) { }) } } + +func TestUtilNormalizedProviderID(t *testing.T) { + for _, tc := range []struct { + description string + providerID string + expectedID normalizedProviderID + }{{ + description: "nil string yields empty string", + providerID: "", + expectedID: "", + }, { + description: "empty string", + providerID: "", + expectedID: "", + }, { + description: "id without / characters", + providerID: "i-12345678", + expectedID: "i-12345678", + }, { + description: "id with / characters", + providerID: "aws:////i-12345678", + expectedID: "i-12345678", + }} { + t.Run(tc.description, func(t *testing.T) { + actualID := normalizedProviderString(tc.providerID) + if actualID != tc.expectedID { + t.Errorf("expected %v, got %v", tc.expectedID, actualID) + } + }) + } +} From 3955223eeeeea11089cc15779fcf1b4c5d8b54e9 Mon Sep 17 00:00:00 2001 From: Andrew McDermott Date: Tue, 10 Mar 2020 10:54:21 +0000 Subject: [PATCH 10/10] Updating vendor against git@github.com:kubernetes/kubernetes.git:f8ff8f44206ff4dd9b58386d96462b01a3d79fb1 (f8ff8f44206ff4dd9b58386d96462b01a3d79fb1) --- cluster-autoscaler/go.mod | 50 +-- .../client-go/dynamic/dynamicinformer/BUILD | 53 +++ .../dynamic/dynamicinformer/informer.go | 158 ++++++++ .../dynamic/dynamicinformer/interface.go | 34 ++ .../client-go/dynamic/dynamiclister/BUILD | 49 +++ .../dynamic/dynamiclister/interface.go | 40 ++ .../client-go/dynamic/dynamiclister/lister.go | 91 +++++ .../client-go/dynamic/dynamiclister/shim.go | 87 ++++ .../k8s.io/client-go/dynamic/fake/BUILD | 55 +++ .../k8s.io/client-go/dynamic/fake/simple.go | 371 ++++++++++++++++++ cluster-autoscaler/vendor/modules.txt | 31 +- 11 files changed, 981 insertions(+), 38 deletions(-) create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/dynamic/dynamicinformer/BUILD create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/dynamic/dynamicinformer/informer.go create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/dynamic/dynamicinformer/interface.go create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/dynamic/dynamiclister/BUILD create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/dynamic/dynamiclister/interface.go create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/dynamic/dynamiclister/lister.go create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/dynamic/dynamiclister/shim.go create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/dynamic/fake/BUILD create mode 100644 cluster-autoscaler/vendor/k8s.io/client-go/dynamic/fake/simple.go diff --git a/cluster-autoscaler/go.mod b/cluster-autoscaler/go.mod index a8c249edd4e0..165967cca734 100644 --- a/cluster-autoscaler/go.mod +++ b/cluster-autoscaler/go.mod @@ -18,6 +18,7 @@ require ( github.com/ghodss/yaml v1.0.0 github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af github.com/json-iterator/go v1.1.8 + github.com/pkg/errors v0.8.1 github.com/satori/go.uuid v1.2.0 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.4.0 @@ -35,6 +36,7 @@ require ( k8s.io/klog v1.0.0 k8s.io/kubernetes v0.0.0 k8s.io/legacy-cloud-providers v0.0.0 + k8s.io/utils v0.0.0-20200229041039-0a110f9eb7ab ) replace ( @@ -402,34 +404,34 @@ replace ( gotest.tools/gotestsum => gotest.tools/gotestsum v0.3.5 grpc.go4.org => grpc.go4.org v0.0.0-20170609214715-11d0a25b4919 honnef.co/go/tools => honnef.co/go/tools v0.0.1-2019.2.2 - k8s.io/api => /tmp/ca-update-vendor.ou1l/kubernetes/staging/src/k8s.io/api - k8s.io/apiextensions-apiserver => /tmp/ca-update-vendor.ou1l/kubernetes/staging/src/k8s.io/apiextensions-apiserver - k8s.io/apimachinery => /tmp/ca-update-vendor.ou1l/kubernetes/staging/src/k8s.io/apimachinery - k8s.io/apiserver => /tmp/ca-update-vendor.ou1l/kubernetes/staging/src/k8s.io/apiserver - k8s.io/cli-runtime => /tmp/ca-update-vendor.ou1l/kubernetes/staging/src/k8s.io/cli-runtime - k8s.io/client-go => /tmp/ca-update-vendor.ou1l/kubernetes/staging/src/k8s.io/client-go - k8s.io/cloud-provider => /tmp/ca-update-vendor.ou1l/kubernetes/staging/src/k8s.io/cloud-provider - k8s.io/cluster-bootstrap => /tmp/ca-update-vendor.ou1l/kubernetes/staging/src/k8s.io/cluster-bootstrap - k8s.io/code-generator => /tmp/ca-update-vendor.ou1l/kubernetes/staging/src/k8s.io/code-generator - k8s.io/component-base => /tmp/ca-update-vendor.ou1l/kubernetes/staging/src/k8s.io/component-base - k8s.io/cri-api => /tmp/ca-update-vendor.ou1l/kubernetes/staging/src/k8s.io/cri-api - k8s.io/csi-translation-lib => /tmp/ca-update-vendor.ou1l/kubernetes/staging/src/k8s.io/csi-translation-lib + k8s.io/api => /tmp/ca-update-vendor.Hz3P/kubernetes/staging/src/k8s.io/api + k8s.io/apiextensions-apiserver => /tmp/ca-update-vendor.Hz3P/kubernetes/staging/src/k8s.io/apiextensions-apiserver + k8s.io/apimachinery => /tmp/ca-update-vendor.Hz3P/kubernetes/staging/src/k8s.io/apimachinery + k8s.io/apiserver => /tmp/ca-update-vendor.Hz3P/kubernetes/staging/src/k8s.io/apiserver + k8s.io/cli-runtime => /tmp/ca-update-vendor.Hz3P/kubernetes/staging/src/k8s.io/cli-runtime + k8s.io/client-go => /tmp/ca-update-vendor.Hz3P/kubernetes/staging/src/k8s.io/client-go + k8s.io/cloud-provider => /tmp/ca-update-vendor.Hz3P/kubernetes/staging/src/k8s.io/cloud-provider + k8s.io/cluster-bootstrap => /tmp/ca-update-vendor.Hz3P/kubernetes/staging/src/k8s.io/cluster-bootstrap + k8s.io/code-generator => /tmp/ca-update-vendor.Hz3P/kubernetes/staging/src/k8s.io/code-generator + k8s.io/component-base => /tmp/ca-update-vendor.Hz3P/kubernetes/staging/src/k8s.io/component-base + k8s.io/cri-api => /tmp/ca-update-vendor.Hz3P/kubernetes/staging/src/k8s.io/cri-api + k8s.io/csi-translation-lib => /tmp/ca-update-vendor.Hz3P/kubernetes/staging/src/k8s.io/csi-translation-lib k8s.io/gengo => k8s.io/gengo v0.0.0-20200114144118-36b2048a9120 k8s.io/heapster => k8s.io/heapster v1.2.0-beta.1 k8s.io/klog => k8s.io/klog v1.0.0 - k8s.io/kube-aggregator => /tmp/ca-update-vendor.ou1l/kubernetes/staging/src/k8s.io/kube-aggregator - k8s.io/kube-controller-manager => /tmp/ca-update-vendor.ou1l/kubernetes/staging/src/k8s.io/kube-controller-manager + k8s.io/kube-aggregator => /tmp/ca-update-vendor.Hz3P/kubernetes/staging/src/k8s.io/kube-aggregator + k8s.io/kube-controller-manager => /tmp/ca-update-vendor.Hz3P/kubernetes/staging/src/k8s.io/kube-controller-manager k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c - k8s.io/kube-proxy => /tmp/ca-update-vendor.ou1l/kubernetes/staging/src/k8s.io/kube-proxy - k8s.io/kube-scheduler => /tmp/ca-update-vendor.ou1l/kubernetes/staging/src/k8s.io/kube-scheduler - k8s.io/kubectl => /tmp/ca-update-vendor.ou1l/kubernetes/staging/src/k8s.io/kubectl - k8s.io/kubelet => /tmp/ca-update-vendor.ou1l/kubernetes/staging/src/k8s.io/kubelet - k8s.io/legacy-cloud-providers => /tmp/ca-update-vendor.ou1l/kubernetes/staging/src/k8s.io/legacy-cloud-providers - k8s.io/metrics => /tmp/ca-update-vendor.ou1l/kubernetes/staging/src/k8s.io/metrics + k8s.io/kube-proxy => /tmp/ca-update-vendor.Hz3P/kubernetes/staging/src/k8s.io/kube-proxy + k8s.io/kube-scheduler => /tmp/ca-update-vendor.Hz3P/kubernetes/staging/src/k8s.io/kube-scheduler + k8s.io/kubectl => /tmp/ca-update-vendor.Hz3P/kubernetes/staging/src/k8s.io/kubectl + k8s.io/kubelet => /tmp/ca-update-vendor.Hz3P/kubernetes/staging/src/k8s.io/kubelet + k8s.io/legacy-cloud-providers => /tmp/ca-update-vendor.Hz3P/kubernetes/staging/src/k8s.io/legacy-cloud-providers + k8s.io/metrics => /tmp/ca-update-vendor.Hz3P/kubernetes/staging/src/k8s.io/metrics k8s.io/repo-infra => k8s.io/repo-infra v0.0.1-alpha.1 - k8s.io/sample-apiserver => /tmp/ca-update-vendor.ou1l/kubernetes/staging/src/k8s.io/sample-apiserver - k8s.io/sample-cli-plugin => /tmp/ca-update-vendor.ou1l/kubernetes/staging/src/k8s.io/sample-cli-plugin - k8s.io/sample-controller => /tmp/ca-update-vendor.ou1l/kubernetes/staging/src/k8s.io/sample-controller + k8s.io/sample-apiserver => /tmp/ca-update-vendor.Hz3P/kubernetes/staging/src/k8s.io/sample-apiserver + k8s.io/sample-cli-plugin => /tmp/ca-update-vendor.Hz3P/kubernetes/staging/src/k8s.io/sample-cli-plugin + k8s.io/sample-controller => /tmp/ca-update-vendor.Hz3P/kubernetes/staging/src/k8s.io/sample-controller k8s.io/system-validators => k8s.io/system-validators v1.0.4 k8s.io/utils => k8s.io/utils v0.0.0-20200229041039-0a110f9eb7ab modernc.org/cc => modernc.org/cc v1.0.0 @@ -453,4 +455,4 @@ replace github.com/digitalocean/godo => github.com/digitalocean/godo v1.27.0 replace github.com/rancher/go-rancher => github.com/rancher/go-rancher v0.1.0 -replace k8s.io/kubernetes => /tmp/ca-update-vendor.ou1l/kubernetes +replace k8s.io/kubernetes => /tmp/ca-update-vendor.Hz3P/kubernetes diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/dynamicinformer/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/dynamicinformer/BUILD new file mode 100644 index 000000000000..0a708d17a9a6 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/dynamicinformer/BUILD @@ -0,0 +1,53 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "informer.go", + "interface.go", + ], + importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/dynamic/dynamicinformer", + importpath = "k8s.io/client-go/dynamic/dynamicinformer", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", + "//staging/src/k8s.io/client-go/dynamic:go_default_library", + "//staging/src/k8s.io/client-go/dynamic/dynamiclister:go_default_library", + "//staging/src/k8s.io/client-go/informers:go_default_library", + "//staging/src/k8s.io/client-go/tools/cache:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["informer_test.go"], + embed = [":go_default_library"], + deps = [ + "//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library", + "//staging/src/k8s.io/client-go/dynamic/fake:go_default_library", + "//staging/src/k8s.io/client-go/tools/cache:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/dynamicinformer/informer.go b/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/dynamicinformer/informer.go new file mode 100644 index 000000000000..40878b400f6a --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/dynamicinformer/informer.go @@ -0,0 +1,158 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamicinformer + +import ( + "context" + "sync" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/dynamic/dynamiclister" + "k8s.io/client-go/informers" + "k8s.io/client-go/tools/cache" +) + +// NewDynamicSharedInformerFactory constructs a new instance of dynamicSharedInformerFactory for all namespaces. +func NewDynamicSharedInformerFactory(client dynamic.Interface, defaultResync time.Duration) DynamicSharedInformerFactory { + return NewFilteredDynamicSharedInformerFactory(client, defaultResync, metav1.NamespaceAll, nil) +} + +// NewFilteredDynamicSharedInformerFactory constructs a new instance of dynamicSharedInformerFactory. +// Listers obtained via this factory will be subject to the same filters as specified here. +func NewFilteredDynamicSharedInformerFactory(client dynamic.Interface, defaultResync time.Duration, namespace string, tweakListOptions TweakListOptionsFunc) DynamicSharedInformerFactory { + return &dynamicSharedInformerFactory{ + client: client, + defaultResync: defaultResync, + namespace: namespace, + informers: map[schema.GroupVersionResource]informers.GenericInformer{}, + startedInformers: make(map[schema.GroupVersionResource]bool), + tweakListOptions: tweakListOptions, + } +} + +type dynamicSharedInformerFactory struct { + client dynamic.Interface + defaultResync time.Duration + namespace string + + lock sync.Mutex + informers map[schema.GroupVersionResource]informers.GenericInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[schema.GroupVersionResource]bool + tweakListOptions TweakListOptionsFunc +} + +var _ DynamicSharedInformerFactory = &dynamicSharedInformerFactory{} + +func (f *dynamicSharedInformerFactory) ForResource(gvr schema.GroupVersionResource) informers.GenericInformer { + f.lock.Lock() + defer f.lock.Unlock() + + key := gvr + informer, exists := f.informers[key] + if exists { + return informer + } + + informer = NewFilteredDynamicInformer(f.client, gvr, f.namespace, f.defaultResync, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) + f.informers[key] = informer + + return informer +} + +// Start initializes all requested informers. +func (f *dynamicSharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + go informer.Informer().Run(stopCh) + f.startedInformers[informerType] = true + } + } +} + +// WaitForCacheSync waits for all started informers' cache were synced. +func (f *dynamicSharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[schema.GroupVersionResource]bool { + informers := func() map[schema.GroupVersionResource]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[schema.GroupVersionResource]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer.Informer() + } + } + return informers + }() + + res := map[schema.GroupVersionResource]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +// NewFilteredDynamicInformer constructs a new informer for a dynamic type. +func NewFilteredDynamicInformer(client dynamic.Interface, gvr schema.GroupVersionResource, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions TweakListOptionsFunc) informers.GenericInformer { + return &dynamicInformer{ + gvr: gvr, + informer: cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.Resource(gvr).Namespace(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.Resource(gvr).Namespace(namespace).Watch(context.TODO(), options) + }, + }, + &unstructured.Unstructured{}, + resyncPeriod, + indexers, + ), + } +} + +type dynamicInformer struct { + informer cache.SharedIndexInformer + gvr schema.GroupVersionResource +} + +var _ informers.GenericInformer = &dynamicInformer{} + +func (d *dynamicInformer) Informer() cache.SharedIndexInformer { + return d.informer +} + +func (d *dynamicInformer) Lister() cache.GenericLister { + return dynamiclister.NewRuntimeObjectShim(dynamiclister.New(d.informer.GetIndexer(), d.gvr)) +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/dynamicinformer/interface.go b/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/dynamicinformer/interface.go new file mode 100644 index 000000000000..083977c301bb --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/dynamicinformer/interface.go @@ -0,0 +1,34 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamicinformer + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/informers" +) + +// DynamicSharedInformerFactory provides access to a shared informer and lister for dynamic client +type DynamicSharedInformerFactory interface { + Start(stopCh <-chan struct{}) + ForResource(gvr schema.GroupVersionResource) informers.GenericInformer + WaitForCacheSync(stopCh <-chan struct{}) map[schema.GroupVersionResource]bool +} + +// TweakListOptionsFunc defines the signature of a helper function +// that wants to provide more listing options to API +type TweakListOptionsFunc func(*metav1.ListOptions) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/dynamiclister/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/dynamiclister/BUILD new file mode 100644 index 000000000000..c1bb09e9b7c6 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/dynamiclister/BUILD @@ -0,0 +1,49 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "interface.go", + "lister.go", + "shim.go", + ], + importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/dynamic/dynamiclister", + importpath = "k8s.io/client-go/dynamic/dynamiclister", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//staging/src/k8s.io/client-go/tools/cache:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["lister_test.go"], + embed = [":go_default_library"], + deps = [ + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library", + "//staging/src/k8s.io/client-go/tools/cache:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/dynamiclister/interface.go b/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/dynamiclister/interface.go new file mode 100644 index 000000000000..c39cbee925b6 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/dynamiclister/interface.go @@ -0,0 +1,40 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiclister + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" +) + +// Lister helps list resources. +type Lister interface { + // List lists all resources in the indexer. + List(selector labels.Selector) (ret []*unstructured.Unstructured, err error) + // Get retrieves a resource from the indexer with the given name + Get(name string) (*unstructured.Unstructured, error) + // Namespace returns an object that can list and get resources in a given namespace. + Namespace(namespace string) NamespaceLister +} + +// NamespaceLister helps list and get resources. +type NamespaceLister interface { + // List lists all resources in the indexer for a given namespace. + List(selector labels.Selector) (ret []*unstructured.Unstructured, err error) + // Get retrieves a resource from the indexer for a given namespace and name. + Get(name string) (*unstructured.Unstructured, error) +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/dynamiclister/lister.go b/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/dynamiclister/lister.go new file mode 100644 index 000000000000..a50fc471e900 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/dynamiclister/lister.go @@ -0,0 +1,91 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiclister + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/tools/cache" +) + +var _ Lister = &dynamicLister{} +var _ NamespaceLister = &dynamicNamespaceLister{} + +// dynamicLister implements the Lister interface. +type dynamicLister struct { + indexer cache.Indexer + gvr schema.GroupVersionResource +} + +// New returns a new Lister. +func New(indexer cache.Indexer, gvr schema.GroupVersionResource) Lister { + return &dynamicLister{indexer: indexer, gvr: gvr} +} + +// List lists all resources in the indexer. +func (l *dynamicLister) List(selector labels.Selector) (ret []*unstructured.Unstructured, err error) { + err = cache.ListAll(l.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*unstructured.Unstructured)) + }) + return ret, err +} + +// Get retrieves a resource from the indexer with the given name +func (l *dynamicLister) Get(name string) (*unstructured.Unstructured, error) { + obj, exists, err := l.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(l.gvr.GroupResource(), name) + } + return obj.(*unstructured.Unstructured), nil +} + +// Namespace returns an object that can list and get resources from a given namespace. +func (l *dynamicLister) Namespace(namespace string) NamespaceLister { + return &dynamicNamespaceLister{indexer: l.indexer, namespace: namespace, gvr: l.gvr} +} + +// dynamicNamespaceLister implements the NamespaceLister interface. +type dynamicNamespaceLister struct { + indexer cache.Indexer + namespace string + gvr schema.GroupVersionResource +} + +// List lists all resources in the indexer for a given namespace. +func (l *dynamicNamespaceLister) List(selector labels.Selector) (ret []*unstructured.Unstructured, err error) { + err = cache.ListAllByNamespace(l.indexer, l.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*unstructured.Unstructured)) + }) + return ret, err +} + +// Get retrieves a resource from the indexer for a given namespace and name. +func (l *dynamicNamespaceLister) Get(name string) (*unstructured.Unstructured, error) { + obj, exists, err := l.indexer.GetByKey(l.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(l.gvr.GroupResource(), name) + } + return obj.(*unstructured.Unstructured), nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/dynamiclister/shim.go b/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/dynamiclister/shim.go new file mode 100644 index 000000000000..92a5f54af972 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/dynamiclister/shim.go @@ -0,0 +1,87 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiclister + +import ( + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/cache" +) + +var _ cache.GenericLister = &dynamicListerShim{} +var _ cache.GenericNamespaceLister = &dynamicNamespaceListerShim{} + +// dynamicListerShim implements the cache.GenericLister interface. +type dynamicListerShim struct { + lister Lister +} + +// NewRuntimeObjectShim returns a new shim for Lister. +// It wraps Lister so that it implements cache.GenericLister interface +func NewRuntimeObjectShim(lister Lister) cache.GenericLister { + return &dynamicListerShim{lister: lister} +} + +// List will return all objects across namespaces +func (s *dynamicListerShim) List(selector labels.Selector) (ret []runtime.Object, err error) { + objs, err := s.lister.List(selector) + if err != nil { + return nil, err + } + + ret = make([]runtime.Object, len(objs)) + for index, obj := range objs { + ret[index] = obj + } + return ret, err +} + +// Get will attempt to retrieve assuming that name==key +func (s *dynamicListerShim) Get(name string) (runtime.Object, error) { + return s.lister.Get(name) +} + +func (s *dynamicListerShim) ByNamespace(namespace string) cache.GenericNamespaceLister { + return &dynamicNamespaceListerShim{ + namespaceLister: s.lister.Namespace(namespace), + } +} + +// dynamicNamespaceListerShim implements the NamespaceLister interface. +// It wraps NamespaceLister so that it implements cache.GenericNamespaceLister interface +type dynamicNamespaceListerShim struct { + namespaceLister NamespaceLister +} + +// List will return all objects in this namespace +func (ns *dynamicNamespaceListerShim) List(selector labels.Selector) (ret []runtime.Object, err error) { + objs, err := ns.namespaceLister.List(selector) + if err != nil { + return nil, err + } + + ret = make([]runtime.Object, len(objs)) + for index, obj := range objs { + ret[index] = obj + } + return ret, err +} + +// Get will attempt to retrieve by namespace and name +func (ns *dynamicNamespaceListerShim) Get(name string) (runtime.Object, error) { + return ns.namespaceLister.Get(name) +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/fake/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/fake/BUILD new file mode 100644 index 000000000000..1529f3235fa8 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/fake/BUILD @@ -0,0 +1,55 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_library( + name = "go_default_library", + srcs = ["simple.go"], + importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/dynamic/fake", + importpath = "k8s.io/client-go/dynamic/fake", + deps = [ + "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", + "//staging/src/k8s.io/client-go/dynamic:go_default_library", + "//staging/src/k8s.io/client-go/testing:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) + +go_test( + name = "go_default_test", + srcs = ["simple_test.go"], + embed = [":go_default_library"], + deps = [ + "//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library", + ], +) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/fake/simple.go b/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/fake/simple.go new file mode 100644 index 000000000000..b2c5f6f34360 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/fake/simple.go @@ -0,0 +1,371 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "context" + "strings" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/testing" +) + +func NewSimpleDynamicClient(scheme *runtime.Scheme, objects ...runtime.Object) *FakeDynamicClient { + // In order to use List with this client, you have to have the v1.List registered in your scheme. Neat thing though + // it does NOT have to be the *same* list + scheme.AddKnownTypeWithName(schema.GroupVersionKind{Group: "fake-dynamic-client-group", Version: "v1", Kind: "List"}, &unstructured.UnstructuredList{}) + + codecs := serializer.NewCodecFactory(scheme) + o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &FakeDynamicClient{scheme: scheme} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type FakeDynamicClient struct { + testing.Fake + scheme *runtime.Scheme +} + +type dynamicResourceClient struct { + client *FakeDynamicClient + namespace string + resource schema.GroupVersionResource +} + +var _ dynamic.Interface = &FakeDynamicClient{} + +func (c *FakeDynamicClient) Resource(resource schema.GroupVersionResource) dynamic.NamespaceableResourceInterface { + return &dynamicResourceClient{client: c, resource: resource} +} + +func (c *dynamicResourceClient) Namespace(ns string) dynamic.ResourceInterface { + ret := *c + ret.namespace = ns + return &ret +} + +func (c *dynamicResourceClient) Create(ctx context.Context, obj *unstructured.Unstructured, opts metav1.CreateOptions, subresources ...string) (*unstructured.Unstructured, error) { + var uncastRet runtime.Object + var err error + switch { + case len(c.namespace) == 0 && len(subresources) == 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewRootCreateAction(c.resource, obj), obj) + + case len(c.namespace) == 0 && len(subresources) > 0: + accessor, err := meta.Accessor(obj) + if err != nil { + return nil, err + } + name := accessor.GetName() + uncastRet, err = c.client.Fake. + Invokes(testing.NewRootCreateSubresourceAction(c.resource, name, strings.Join(subresources, "/"), obj), obj) + + case len(c.namespace) > 0 && len(subresources) == 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewCreateAction(c.resource, c.namespace, obj), obj) + + case len(c.namespace) > 0 && len(subresources) > 0: + accessor, err := meta.Accessor(obj) + if err != nil { + return nil, err + } + name := accessor.GetName() + uncastRet, err = c.client.Fake. + Invokes(testing.NewCreateSubresourceAction(c.resource, name, strings.Join(subresources, "/"), c.namespace, obj), obj) + + } + + if err != nil { + return nil, err + } + if uncastRet == nil { + return nil, err + } + + ret := &unstructured.Unstructured{} + if err := c.client.scheme.Convert(uncastRet, ret, nil); err != nil { + return nil, err + } + return ret, err +} + +func (c *dynamicResourceClient) Update(ctx context.Context, obj *unstructured.Unstructured, opts metav1.UpdateOptions, subresources ...string) (*unstructured.Unstructured, error) { + var uncastRet runtime.Object + var err error + switch { + case len(c.namespace) == 0 && len(subresources) == 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewRootUpdateAction(c.resource, obj), obj) + + case len(c.namespace) == 0 && len(subresources) > 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(c.resource, strings.Join(subresources, "/"), obj), obj) + + case len(c.namespace) > 0 && len(subresources) == 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewUpdateAction(c.resource, c.namespace, obj), obj) + + case len(c.namespace) > 0 && len(subresources) > 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewUpdateSubresourceAction(c.resource, strings.Join(subresources, "/"), c.namespace, obj), obj) + + } + + if err != nil { + return nil, err + } + if uncastRet == nil { + return nil, err + } + + ret := &unstructured.Unstructured{} + if err := c.client.scheme.Convert(uncastRet, ret, nil); err != nil { + return nil, err + } + return ret, err +} + +func (c *dynamicResourceClient) UpdateStatus(ctx context.Context, obj *unstructured.Unstructured, opts metav1.UpdateOptions) (*unstructured.Unstructured, error) { + var uncastRet runtime.Object + var err error + switch { + case len(c.namespace) == 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(c.resource, "status", obj), obj) + + case len(c.namespace) > 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewUpdateSubresourceAction(c.resource, "status", c.namespace, obj), obj) + + } + + if err != nil { + return nil, err + } + if uncastRet == nil { + return nil, err + } + + ret := &unstructured.Unstructured{} + if err := c.client.scheme.Convert(uncastRet, ret, nil); err != nil { + return nil, err + } + return ret, err +} + +func (c *dynamicResourceClient) Delete(ctx context.Context, name string, opts metav1.DeleteOptions, subresources ...string) error { + var err error + switch { + case len(c.namespace) == 0 && len(subresources) == 0: + _, err = c.client.Fake. + Invokes(testing.NewRootDeleteAction(c.resource, name), &metav1.Status{Status: "dynamic delete fail"}) + + case len(c.namespace) == 0 && len(subresources) > 0: + _, err = c.client.Fake. + Invokes(testing.NewRootDeleteSubresourceAction(c.resource, strings.Join(subresources, "/"), name), &metav1.Status{Status: "dynamic delete fail"}) + + case len(c.namespace) > 0 && len(subresources) == 0: + _, err = c.client.Fake. + Invokes(testing.NewDeleteAction(c.resource, c.namespace, name), &metav1.Status{Status: "dynamic delete fail"}) + + case len(c.namespace) > 0 && len(subresources) > 0: + _, err = c.client.Fake. + Invokes(testing.NewDeleteSubresourceAction(c.resource, strings.Join(subresources, "/"), c.namespace, name), &metav1.Status{Status: "dynamic delete fail"}) + } + + return err +} + +func (c *dynamicResourceClient) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var err error + switch { + case len(c.namespace) == 0: + action := testing.NewRootDeleteCollectionAction(c.resource, listOptions) + _, err = c.client.Fake.Invokes(action, &metav1.Status{Status: "dynamic deletecollection fail"}) + + case len(c.namespace) > 0: + action := testing.NewDeleteCollectionAction(c.resource, c.namespace, listOptions) + _, err = c.client.Fake.Invokes(action, &metav1.Status{Status: "dynamic deletecollection fail"}) + + } + + return err +} + +func (c *dynamicResourceClient) Get(ctx context.Context, name string, opts metav1.GetOptions, subresources ...string) (*unstructured.Unstructured, error) { + var uncastRet runtime.Object + var err error + switch { + case len(c.namespace) == 0 && len(subresources) == 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewRootGetAction(c.resource, name), &metav1.Status{Status: "dynamic get fail"}) + + case len(c.namespace) == 0 && len(subresources) > 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewRootGetSubresourceAction(c.resource, strings.Join(subresources, "/"), name), &metav1.Status{Status: "dynamic get fail"}) + + case len(c.namespace) > 0 && len(subresources) == 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewGetAction(c.resource, c.namespace, name), &metav1.Status{Status: "dynamic get fail"}) + + case len(c.namespace) > 0 && len(subresources) > 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewGetSubresourceAction(c.resource, c.namespace, strings.Join(subresources, "/"), name), &metav1.Status{Status: "dynamic get fail"}) + } + + if err != nil { + return nil, err + } + if uncastRet == nil { + return nil, err + } + + ret := &unstructured.Unstructured{} + if err := c.client.scheme.Convert(uncastRet, ret, nil); err != nil { + return nil, err + } + return ret, err +} + +func (c *dynamicResourceClient) List(ctx context.Context, opts metav1.ListOptions) (*unstructured.UnstructuredList, error) { + var obj runtime.Object + var err error + switch { + case len(c.namespace) == 0: + obj, err = c.client.Fake. + Invokes(testing.NewRootListAction(c.resource, schema.GroupVersionKind{Group: "fake-dynamic-client-group", Version: "v1", Kind: "" /*List is appended by the tracker automatically*/}, opts), &metav1.Status{Status: "dynamic list fail"}) + + case len(c.namespace) > 0: + obj, err = c.client.Fake. + Invokes(testing.NewListAction(c.resource, schema.GroupVersionKind{Group: "fake-dynamic-client-group", Version: "v1", Kind: "" /*List is appended by the tracker automatically*/}, c.namespace, opts), &metav1.Status{Status: "dynamic list fail"}) + + } + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + + retUnstructured := &unstructured.Unstructured{} + if err := c.client.scheme.Convert(obj, retUnstructured, nil); err != nil { + return nil, err + } + entireList, err := retUnstructured.ToList() + if err != nil { + return nil, err + } + + list := &unstructured.UnstructuredList{} + list.SetResourceVersion(entireList.GetResourceVersion()) + for i := range entireList.Items { + item := &entireList.Items[i] + metadata, err := meta.Accessor(item) + if err != nil { + return nil, err + } + if label.Matches(labels.Set(metadata.GetLabels())) { + list.Items = append(list.Items, *item) + } + } + return list, nil +} + +func (c *dynamicResourceClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + switch { + case len(c.namespace) == 0: + return c.client.Fake. + InvokesWatch(testing.NewRootWatchAction(c.resource, opts)) + + case len(c.namespace) > 0: + return c.client.Fake. + InvokesWatch(testing.NewWatchAction(c.resource, c.namespace, opts)) + + } + + panic("math broke") +} + +// TODO: opts are currently ignored. +func (c *dynamicResourceClient) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (*unstructured.Unstructured, error) { + var uncastRet runtime.Object + var err error + switch { + case len(c.namespace) == 0 && len(subresources) == 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewRootPatchAction(c.resource, name, pt, data), &metav1.Status{Status: "dynamic patch fail"}) + + case len(c.namespace) == 0 && len(subresources) > 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewRootPatchSubresourceAction(c.resource, name, pt, data, subresources...), &metav1.Status{Status: "dynamic patch fail"}) + + case len(c.namespace) > 0 && len(subresources) == 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewPatchAction(c.resource, c.namespace, name, pt, data), &metav1.Status{Status: "dynamic patch fail"}) + + case len(c.namespace) > 0 && len(subresources) > 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewPatchSubresourceAction(c.resource, c.namespace, name, pt, data, subresources...), &metav1.Status{Status: "dynamic patch fail"}) + + } + + if err != nil { + return nil, err + } + if uncastRet == nil { + return nil, err + } + + ret := &unstructured.Unstructured{} + if err := c.client.scheme.Convert(uncastRet, ret, nil); err != nil { + return nil, err + } + return ret, err +} diff --git a/cluster-autoscaler/vendor/modules.txt b/cluster-autoscaler/vendor/modules.txt index 41bc6af99340..f7e030bfba24 100644 --- a/cluster-autoscaler/vendor/modules.txt +++ b/cluster-autoscaler/vendor/modules.txt @@ -687,7 +687,7 @@ gopkg.in/square/go-jose.v2/jwt gopkg.in/warnings.v0 # gopkg.in/yaml.v2 v2.2.8 => gopkg.in/yaml.v2 v2.2.8 gopkg.in/yaml.v2 -# k8s.io/api v0.0.0 => /tmp/ca-update-vendor.ou1l/kubernetes/staging/src/k8s.io/api +# k8s.io/api v0.0.0 => /tmp/ca-update-vendor.Hz3P/kubernetes/staging/src/k8s.io/api k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 k8s.io/api/admissionregistration/v1 @@ -730,7 +730,7 @@ k8s.io/api/settings/v1alpha1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apimachinery v0.0.0 => /tmp/ca-update-vendor.ou1l/kubernetes/staging/src/k8s.io/apimachinery +# k8s.io/apimachinery v0.0.0 => /tmp/ca-update-vendor.Hz3P/kubernetes/staging/src/k8s.io/apimachinery k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/api/meta @@ -788,7 +788,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.0.0 => /tmp/ca-update-vendor.ou1l/kubernetes/staging/src/k8s.io/apiserver +# k8s.io/apiserver v0.0.0 => /tmp/ca-update-vendor.Hz3P/kubernetes/staging/src/k8s.io/apiserver k8s.io/apiserver/pkg/admission k8s.io/apiserver/pkg/admission/configuration k8s.io/apiserver/pkg/admission/initializer @@ -885,10 +885,13 @@ k8s.io/apiserver/pkg/util/webhook k8s.io/apiserver/pkg/util/wsstream k8s.io/apiserver/plugin/pkg/authenticator/token/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook -# k8s.io/client-go v0.0.0 => /tmp/ca-update-vendor.ou1l/kubernetes/staging/src/k8s.io/client-go +# k8s.io/client-go v0.0.0 => /tmp/ca-update-vendor.Hz3P/kubernetes/staging/src/k8s.io/client-go k8s.io/client-go/discovery k8s.io/client-go/discovery/fake k8s.io/client-go/dynamic +k8s.io/client-go/dynamic/dynamicinformer +k8s.io/client-go/dynamic/dynamiclister +k8s.io/client-go/dynamic/fake k8s.io/client-go/informers k8s.io/client-go/informers/admissionregistration k8s.io/client-go/informers/admissionregistration/v1 @@ -1112,7 +1115,7 @@ k8s.io/client-go/util/homedir k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue -# k8s.io/cloud-provider v0.0.0 => /tmp/ca-update-vendor.ou1l/kubernetes/staging/src/k8s.io/cloud-provider +# k8s.io/cloud-provider v0.0.0 => /tmp/ca-update-vendor.Hz3P/kubernetes/staging/src/k8s.io/cloud-provider k8s.io/cloud-provider k8s.io/cloud-provider/api k8s.io/cloud-provider/node/helpers @@ -1120,7 +1123,7 @@ k8s.io/cloud-provider/service/helpers k8s.io/cloud-provider/volume k8s.io/cloud-provider/volume/errors k8s.io/cloud-provider/volume/helpers -# k8s.io/component-base v0.0.0 => /tmp/ca-update-vendor.ou1l/kubernetes/staging/src/k8s.io/component-base +# k8s.io/component-base v0.0.0 => /tmp/ca-update-vendor.Hz3P/kubernetes/staging/src/k8s.io/component-base k8s.io/component-base/cli/flag k8s.io/component-base/codec k8s.io/component-base/config @@ -1133,10 +1136,10 @@ k8s.io/component-base/metrics/prometheus/restclient k8s.io/component-base/metrics/testutil k8s.io/component-base/version k8s.io/component-base/version/verflag -# k8s.io/cri-api v0.0.0 => /tmp/ca-update-vendor.ou1l/kubernetes/staging/src/k8s.io/cri-api +# k8s.io/cri-api v0.0.0 => /tmp/ca-update-vendor.Hz3P/kubernetes/staging/src/k8s.io/cri-api k8s.io/cri-api/pkg/apis k8s.io/cri-api/pkg/apis/runtime/v1alpha2 -# k8s.io/csi-translation-lib v0.0.0 => /tmp/ca-update-vendor.ou1l/kubernetes/staging/src/k8s.io/csi-translation-lib +# k8s.io/csi-translation-lib v0.0.0 => /tmp/ca-update-vendor.Hz3P/kubernetes/staging/src/k8s.io/csi-translation-lib k8s.io/csi-translation-lib k8s.io/csi-translation-lib/plugins # k8s.io/klog v1.0.0 => k8s.io/klog v1.0.0 @@ -1148,17 +1151,17 @@ k8s.io/kube-openapi/pkg/handler k8s.io/kube-openapi/pkg/schemaconv k8s.io/kube-openapi/pkg/util k8s.io/kube-openapi/pkg/util/proto -# k8s.io/kube-proxy v0.0.0 => /tmp/ca-update-vendor.ou1l/kubernetes/staging/src/k8s.io/kube-proxy +# k8s.io/kube-proxy v0.0.0 => /tmp/ca-update-vendor.Hz3P/kubernetes/staging/src/k8s.io/kube-proxy k8s.io/kube-proxy/config/v1alpha1 -# k8s.io/kube-scheduler v0.0.0 => /tmp/ca-update-vendor.ou1l/kubernetes/staging/src/k8s.io/kube-scheduler +# k8s.io/kube-scheduler v0.0.0 => /tmp/ca-update-vendor.Hz3P/kubernetes/staging/src/k8s.io/kube-scheduler k8s.io/kube-scheduler/extender/v1 -# k8s.io/kubectl v0.0.0 => /tmp/ca-update-vendor.ou1l/kubernetes/staging/src/k8s.io/kubectl +# k8s.io/kubectl v0.0.0 => /tmp/ca-update-vendor.Hz3P/kubernetes/staging/src/k8s.io/kubectl k8s.io/kubectl/pkg/scale -# k8s.io/kubelet v0.0.0 => /tmp/ca-update-vendor.ou1l/kubernetes/staging/src/k8s.io/kubelet +# k8s.io/kubelet v0.0.0 => /tmp/ca-update-vendor.Hz3P/kubernetes/staging/src/k8s.io/kubelet k8s.io/kubelet/config/v1beta1 k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1 k8s.io/kubelet/pkg/apis/pluginregistration/v1 -# k8s.io/kubernetes v0.0.0 => /tmp/ca-update-vendor.ou1l/kubernetes +# k8s.io/kubernetes v0.0.0 => /tmp/ca-update-vendor.Hz3P/kubernetes k8s.io/kubernetes/cmd/kube-proxy/app k8s.io/kubernetes/cmd/kubelet/app k8s.io/kubernetes/cmd/kubelet/app/options @@ -1447,7 +1450,7 @@ k8s.io/kubernetes/pkg/volume/vsphere_volume k8s.io/kubernetes/pkg/windows/service k8s.io/kubernetes/test/utils k8s.io/kubernetes/third_party/forked/golang/expansion -# k8s.io/legacy-cloud-providers v0.0.0 => /tmp/ca-update-vendor.ou1l/kubernetes/staging/src/k8s.io/legacy-cloud-providers +# k8s.io/legacy-cloud-providers v0.0.0 => /tmp/ca-update-vendor.Hz3P/kubernetes/staging/src/k8s.io/legacy-cloud-providers k8s.io/legacy-cloud-providers/aws k8s.io/legacy-cloud-providers/azure k8s.io/legacy-cloud-providers/azure/auth