From 63d09c6ed591da5e62d3240c2b5f70ae6c5f27d7 Mon Sep 17 00:00:00 2001 From: Qing Hao Date: Fri, 24 Nov 2023 17:36:03 +0800 Subject: [PATCH] Update rollout lib (#276) (#298) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix: run `make update` fail because deepcopy doesn't support generic type. (#288) Fix verify ci step missing. (#289) :bug: Use controller-runtime for deepcopy generation for cluster:v1alpha1 (#291) * Revert "Fix: run `make update` fail because deepcopy doesn't support generic type. (#288)" This reverts commit ae208c861684c3f7cc7d46471ebf2bf1c7f85ae1. * Use `controller-gen` for deepcopy cluster:v1alpha1 GenGo isn't respecting the `+k8s:deepcopy-gen=false` tag to skip generation for the generic type --------- 🐛 add ca bundle to addon proxy settings (#293) Revert "remove ClusterSet ClusterSetBinding API version v1beta1 (#266)" This reverts commit 9675ab737d2086dd7094fc962c9617cc71ec7484. Signed-off-by: haoqing0110 Co-authored-by: Mohamed ElSerngawy --- Makefile | 9 +- ...agement.io_addondeploymentconfigs.crd.yaml | 6 + ...ster-management.io_addontemplates.crd.yaml | 1 + addon/v1alpha1/types_addondeploymentconfig.go | 5 + addon/v1alpha1/zz_generated.deepcopy.go | 7 +- .../zz_generated.swagger_doc_generated.go | 1 + .../typed/cluster/v1beta1/cluster_client.go | 10 + .../v1beta1/fake/fake_cluster_client.go | 8 + .../v1beta1/fake/fake_managedclusterset.go | 117 ++ .../fake/fake_managedclustersetbinding.go | 126 ++ .../cluster/v1beta1/generated_expansion.go | 4 + .../cluster/v1beta1/managedclusterset.go | 168 ++ .../v1beta1/managedclustersetbinding.go | 179 ++ .../cluster/v1beta1/interface.go | 14 + .../cluster/v1beta1/managedclusterset.go | 73 + .../v1beta1/managedclustersetbinding.go | 74 + .../informers/externalversions/generic.go | 4 + .../cluster/v1beta1/expansion_generated.go | 12 + .../cluster/v1beta1/managedclusterset.go | 52 + .../v1beta1/managedclustersetbinding.go | 83 + cluster/v1alpha1/helpers.go | 295 +-- cluster/v1alpha1/helpers_test.go | 1858 +++++++++++++---- cluster/v1alpha1/zz_generated.deepcopy.go | 41 +- ...-management.io_managedclustersets.crd.yaml | 207 ++ ...ment.io_managedclustersetbindings.crd.yaml | 136 ++ cluster/v1beta1/helpers.go | 103 + cluster/v1beta1/helpers_test.go | 464 ++++ cluster/v1beta1/register.go | 4 + cluster/v1beta1/types_managedclusterset.go | 99 + .../v1beta1/types_managedclustersetbinding.go | 65 + cluster/v1beta1/zz_generated.deepcopy.go | 222 ++ .../zz_generated.swagger_doc_generated.go | 86 + ...-management.io_managedclustersets.crd.yaml | 189 +- ...ment.io_managedclustersetbindings.crd.yaml | 107 +- hack/update-deepcopy.sh | 5 +- 35 files changed, 4261 insertions(+), 573 deletions(-) create mode 100644 client/cluster/clientset/versioned/typed/cluster/v1beta1/fake/fake_managedclusterset.go create mode 100644 client/cluster/clientset/versioned/typed/cluster/v1beta1/fake/fake_managedclustersetbinding.go create mode 100644 client/cluster/clientset/versioned/typed/cluster/v1beta1/managedclusterset.go create mode 100644 client/cluster/clientset/versioned/typed/cluster/v1beta1/managedclustersetbinding.go create mode 100644 client/cluster/informers/externalversions/cluster/v1beta1/managedclusterset.go create mode 100644 client/cluster/informers/externalversions/cluster/v1beta1/managedclustersetbinding.go create mode 100644 client/cluster/listers/cluster/v1beta1/managedclusterset.go create mode 100644 client/cluster/listers/cluster/v1beta1/managedclustersetbinding.go create mode 100644 cluster/v1beta1/0000_00_clusters.open-cluster-management.io_managedclustersets.crd.yaml create mode 100644 cluster/v1beta1/0000_01_clusters.open-cluster-management.io_managedclustersetbindings.crd.yaml create mode 100644 cluster/v1beta1/types_managedclusterset.go create mode 100644 cluster/v1beta1/types_managedclustersetbinding.go diff --git a/Makefile b/Makefile index 23ec0bd82..06923f0bb 100644 --- a/Makefile +++ b/Makefile @@ -50,11 +50,14 @@ verify-scripts: bash -x hack/verify-crds.sh bash -x hack/verify-codegen.sh .PHONY: verify-scripts -# verify: check-env verify-scripts verify-codegen-crds verify-gocilint -verify: verify-gocilint +verify: check-env verify-scripts verify-codegen-crds verify-gocilint update-scripts: hack/update-deepcopy.sh + # Using controller-gen as a workaround for cluster:v1alpha1 because gengo + # isn't respecting deepcopy-gen:false nor does it support generics + # Issue: https://github.com/kubernetes/gengo/issues/225 + $(CONTROLLER_GEN) object:headerFile="hack/empty.txt" paths="./cluster/v1alpha1" hack/update-swagger-docs.sh hack/update-codegen.sh hack/update-v1beta1-crds.sh @@ -71,7 +74,7 @@ include ./test/integration-test.mk check-env: ifeq ($(GOPATH),) - $(warning "environment variable GOPATH is empty, auto set from go env GOPATH") + $(warning "environment variable GOPATH is empty, auto set from go env GOPATH") export GOPATH=$(shell go env GOPATH) endif .PHONY: check-env diff --git a/addon/v1alpha1/0000_02_addon.open-cluster-management.io_addondeploymentconfigs.crd.yaml b/addon/v1alpha1/0000_02_addon.open-cluster-management.io_addondeploymentconfigs.crd.yaml index 69709fbd5..c60840150 100644 --- a/addon/v1alpha1/0000_02_addon.open-cluster-management.io_addondeploymentconfigs.crd.yaml +++ b/addon/v1alpha1/0000_02_addon.open-cluster-management.io_addondeploymentconfigs.crd.yaml @@ -129,6 +129,12 @@ spec: description: ProxyConfig holds proxy settings for add-on agent on the managed cluster. Empty means no proxy settings is available. properties: + caBundle: + description: CABundle is a CA certificate bundle to verify the + proxy server. And it's only useful when HTTPSProxy is set and + a HTTPS proxy server is specified. + format: byte + type: string httpProxy: description: HTTPProxy is the URL of the proxy for HTTP requests type: string diff --git a/addon/v1alpha1/0000_03_addon.open-cluster-management.io_addontemplates.crd.yaml b/addon/v1alpha1/0000_03_addon.open-cluster-management.io_addontemplates.crd.yaml index 62e016b95..6a063e773 100644 --- a/addon/v1alpha1/0000_03_addon.open-cluster-management.io_addontemplates.crd.yaml +++ b/addon/v1alpha1/0000_03_addon.open-cluster-management.io_addontemplates.crd.yaml @@ -10,6 +10,7 @@ spec: listKind: AddOnTemplateList plural: addontemplates singular: addontemplate + preserveUnknownFields: false scope: Cluster versions: - additionalPrinterColumns: diff --git a/addon/v1alpha1/types_addondeploymentconfig.go b/addon/v1alpha1/types_addondeploymentconfig.go index b72ac3a4c..472fbeb1e 100644 --- a/addon/v1alpha1/types_addondeploymentconfig.go +++ b/addon/v1alpha1/types_addondeploymentconfig.go @@ -111,6 +111,11 @@ type ProxyConfig struct { // +optional HTTPSProxy string `json:"httpsProxy,omitempty"` + // CABundle is a CA certificate bundle to verify the proxy server. + // And it's only useful when HTTPSProxy is set and a HTTPS proxy server is specified. + // +optional + CABundle []byte `json:"caBundle,omitempty"` + // NoProxy is a comma-separated list of hostnames and/or CIDRs and/or IPs for which the proxy // should not be used. // +optional diff --git a/addon/v1alpha1/zz_generated.deepcopy.go b/addon/v1alpha1/zz_generated.deepcopy.go index f4e7e2ede..c0f0917e5 100644 --- a/addon/v1alpha1/zz_generated.deepcopy.go +++ b/addon/v1alpha1/zz_generated.deepcopy.go @@ -107,7 +107,7 @@ func (in *AddOnDeploymentConfigSpec) DeepCopyInto(out *AddOnDeploymentConfigSpec *out = make([]ImageMirror, len(*in)) copy(*out, *in) } - out.ProxyConfig = in.ProxyConfig + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) return } @@ -916,6 +916,11 @@ func (in *PlacementStrategy) DeepCopy() *PlacementStrategy { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ProxyConfig) DeepCopyInto(out *ProxyConfig) { *out = *in + if in.CABundle != nil { + in, out := &in.CABundle, &out.CABundle + *out = make([]byte, len(*in)) + copy(*out, *in) + } return } diff --git a/addon/v1alpha1/zz_generated.swagger_doc_generated.go b/addon/v1alpha1/zz_generated.swagger_doc_generated.go index 148e1b36c..95953b253 100644 --- a/addon/v1alpha1/zz_generated.swagger_doc_generated.go +++ b/addon/v1alpha1/zz_generated.swagger_doc_generated.go @@ -76,6 +76,7 @@ var map_ProxyConfig = map[string]string{ "": "ProxyConfig describes the proxy settings for the add-on agent", "httpProxy": "HTTPProxy is the URL of the proxy for HTTP requests", "httpsProxy": "HTTPSProxy is the URL of the proxy for HTTPS requests", + "caBundle": "CABundle is a CA certificate bundle to verify the proxy server. And it's only useful when HTTPSProxy is set and a HTTPS proxy server is specified.", "noProxy": "NoProxy is a comma-separated list of hostnames and/or CIDRs and/or IPs for which the proxy should not be used.", } diff --git a/client/cluster/clientset/versioned/typed/cluster/v1beta1/cluster_client.go b/client/cluster/clientset/versioned/typed/cluster/v1beta1/cluster_client.go index 42dae5bdd..552027b3d 100644 --- a/client/cluster/clientset/versioned/typed/cluster/v1beta1/cluster_client.go +++ b/client/cluster/clientset/versioned/typed/cluster/v1beta1/cluster_client.go @@ -12,6 +12,8 @@ import ( type ClusterV1beta1Interface interface { RESTClient() rest.Interface + ManagedClusterSetsGetter + ManagedClusterSetBindingsGetter PlacementsGetter PlacementDecisionsGetter } @@ -21,6 +23,14 @@ type ClusterV1beta1Client struct { restClient rest.Interface } +func (c *ClusterV1beta1Client) ManagedClusterSets() ManagedClusterSetInterface { + return newManagedClusterSets(c) +} + +func (c *ClusterV1beta1Client) ManagedClusterSetBindings(namespace string) ManagedClusterSetBindingInterface { + return newManagedClusterSetBindings(c, namespace) +} + func (c *ClusterV1beta1Client) Placements(namespace string) PlacementInterface { return newPlacements(c, namespace) } diff --git a/client/cluster/clientset/versioned/typed/cluster/v1beta1/fake/fake_cluster_client.go b/client/cluster/clientset/versioned/typed/cluster/v1beta1/fake/fake_cluster_client.go index d96dad67f..5bd66bb8e 100644 --- a/client/cluster/clientset/versioned/typed/cluster/v1beta1/fake/fake_cluster_client.go +++ b/client/cluster/clientset/versioned/typed/cluster/v1beta1/fake/fake_cluster_client.go @@ -12,6 +12,14 @@ type FakeClusterV1beta1 struct { *testing.Fake } +func (c *FakeClusterV1beta1) ManagedClusterSets() v1beta1.ManagedClusterSetInterface { + return &FakeManagedClusterSets{c} +} + +func (c *FakeClusterV1beta1) ManagedClusterSetBindings(namespace string) v1beta1.ManagedClusterSetBindingInterface { + return &FakeManagedClusterSetBindings{c, namespace} +} + func (c *FakeClusterV1beta1) Placements(namespace string) v1beta1.PlacementInterface { return &FakePlacements{c, namespace} } diff --git a/client/cluster/clientset/versioned/typed/cluster/v1beta1/fake/fake_managedclusterset.go b/client/cluster/clientset/versioned/typed/cluster/v1beta1/fake/fake_managedclusterset.go new file mode 100644 index 000000000..35ddbfed9 --- /dev/null +++ b/client/cluster/clientset/versioned/typed/cluster/v1beta1/fake/fake_managedclusterset.go @@ -0,0 +1,117 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" + v1beta1 "open-cluster-management.io/api/cluster/v1beta1" +) + +// FakeManagedClusterSets implements ManagedClusterSetInterface +type FakeManagedClusterSets struct { + Fake *FakeClusterV1beta1 +} + +var managedclustersetsResource = schema.GroupVersionResource{Group: "cluster.open-cluster-management.io", Version: "v1beta1", Resource: "managedclustersets"} + +var managedclustersetsKind = schema.GroupVersionKind{Group: "cluster.open-cluster-management.io", Version: "v1beta1", Kind: "ManagedClusterSet"} + +// Get takes name of the managedClusterSet, and returns the corresponding managedClusterSet object, and an error if there is any. +func (c *FakeManagedClusterSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ManagedClusterSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(managedclustersetsResource, name), &v1beta1.ManagedClusterSet{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ManagedClusterSet), err +} + +// List takes label and field selectors, and returns the list of ManagedClusterSets that match those selectors. +func (c *FakeManagedClusterSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ManagedClusterSetList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(managedclustersetsResource, managedclustersetsKind, opts), &v1beta1.ManagedClusterSetList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.ManagedClusterSetList{ListMeta: obj.(*v1beta1.ManagedClusterSetList).ListMeta} + for _, item := range obj.(*v1beta1.ManagedClusterSetList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested managedClusterSets. +func (c *FakeManagedClusterSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(managedclustersetsResource, opts)) +} + +// Create takes the representation of a managedClusterSet and creates it. Returns the server's representation of the managedClusterSet, and an error, if there is any. +func (c *FakeManagedClusterSets) Create(ctx context.Context, managedClusterSet *v1beta1.ManagedClusterSet, opts v1.CreateOptions) (result *v1beta1.ManagedClusterSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(managedclustersetsResource, managedClusterSet), &v1beta1.ManagedClusterSet{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ManagedClusterSet), err +} + +// Update takes the representation of a managedClusterSet and updates it. Returns the server's representation of the managedClusterSet, and an error, if there is any. +func (c *FakeManagedClusterSets) Update(ctx context.Context, managedClusterSet *v1beta1.ManagedClusterSet, opts v1.UpdateOptions) (result *v1beta1.ManagedClusterSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(managedclustersetsResource, managedClusterSet), &v1beta1.ManagedClusterSet{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ManagedClusterSet), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeManagedClusterSets) UpdateStatus(ctx context.Context, managedClusterSet *v1beta1.ManagedClusterSet, opts v1.UpdateOptions) (*v1beta1.ManagedClusterSet, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(managedclustersetsResource, "status", managedClusterSet), &v1beta1.ManagedClusterSet{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ManagedClusterSet), err +} + +// Delete takes name of the managedClusterSet and deletes it. Returns an error if one occurs. +func (c *FakeManagedClusterSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(managedclustersetsResource, name, opts), &v1beta1.ManagedClusterSet{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeManagedClusterSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(managedclustersetsResource, listOpts) + + _, err := c.Fake.Invokes(action, &v1beta1.ManagedClusterSetList{}) + return err +} + +// Patch applies the patch and returns the patched managedClusterSet. +func (c *FakeManagedClusterSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ManagedClusterSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(managedclustersetsResource, name, pt, data, subresources...), &v1beta1.ManagedClusterSet{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ManagedClusterSet), err +} diff --git a/client/cluster/clientset/versioned/typed/cluster/v1beta1/fake/fake_managedclustersetbinding.go b/client/cluster/clientset/versioned/typed/cluster/v1beta1/fake/fake_managedclustersetbinding.go new file mode 100644 index 000000000..7b1cf5929 --- /dev/null +++ b/client/cluster/clientset/versioned/typed/cluster/v1beta1/fake/fake_managedclustersetbinding.go @@ -0,0 +1,126 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" + v1beta1 "open-cluster-management.io/api/cluster/v1beta1" +) + +// FakeManagedClusterSetBindings implements ManagedClusterSetBindingInterface +type FakeManagedClusterSetBindings struct { + Fake *FakeClusterV1beta1 + ns string +} + +var managedclustersetbindingsResource = schema.GroupVersionResource{Group: "cluster.open-cluster-management.io", Version: "v1beta1", Resource: "managedclustersetbindings"} + +var managedclustersetbindingsKind = schema.GroupVersionKind{Group: "cluster.open-cluster-management.io", Version: "v1beta1", Kind: "ManagedClusterSetBinding"} + +// Get takes name of the managedClusterSetBinding, and returns the corresponding managedClusterSetBinding object, and an error if there is any. +func (c *FakeManagedClusterSetBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ManagedClusterSetBinding, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(managedclustersetbindingsResource, c.ns, name), &v1beta1.ManagedClusterSetBinding{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ManagedClusterSetBinding), err +} + +// List takes label and field selectors, and returns the list of ManagedClusterSetBindings that match those selectors. +func (c *FakeManagedClusterSetBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ManagedClusterSetBindingList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(managedclustersetbindingsResource, managedclustersetbindingsKind, c.ns, opts), &v1beta1.ManagedClusterSetBindingList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.ManagedClusterSetBindingList{ListMeta: obj.(*v1beta1.ManagedClusterSetBindingList).ListMeta} + for _, item := range obj.(*v1beta1.ManagedClusterSetBindingList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested managedClusterSetBindings. +func (c *FakeManagedClusterSetBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(managedclustersetbindingsResource, c.ns, opts)) + +} + +// Create takes the representation of a managedClusterSetBinding and creates it. Returns the server's representation of the managedClusterSetBinding, and an error, if there is any. +func (c *FakeManagedClusterSetBindings) Create(ctx context.Context, managedClusterSetBinding *v1beta1.ManagedClusterSetBinding, opts v1.CreateOptions) (result *v1beta1.ManagedClusterSetBinding, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(managedclustersetbindingsResource, c.ns, managedClusterSetBinding), &v1beta1.ManagedClusterSetBinding{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ManagedClusterSetBinding), err +} + +// Update takes the representation of a managedClusterSetBinding and updates it. Returns the server's representation of the managedClusterSetBinding, and an error, if there is any. +func (c *FakeManagedClusterSetBindings) Update(ctx context.Context, managedClusterSetBinding *v1beta1.ManagedClusterSetBinding, opts v1.UpdateOptions) (result *v1beta1.ManagedClusterSetBinding, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(managedclustersetbindingsResource, c.ns, managedClusterSetBinding), &v1beta1.ManagedClusterSetBinding{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ManagedClusterSetBinding), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeManagedClusterSetBindings) UpdateStatus(ctx context.Context, managedClusterSetBinding *v1beta1.ManagedClusterSetBinding, opts v1.UpdateOptions) (*v1beta1.ManagedClusterSetBinding, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(managedclustersetbindingsResource, "status", c.ns, managedClusterSetBinding), &v1beta1.ManagedClusterSetBinding{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ManagedClusterSetBinding), err +} + +// Delete takes name of the managedClusterSetBinding and deletes it. Returns an error if one occurs. +func (c *FakeManagedClusterSetBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(managedclustersetbindingsResource, c.ns, name, opts), &v1beta1.ManagedClusterSetBinding{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeManagedClusterSetBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(managedclustersetbindingsResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1beta1.ManagedClusterSetBindingList{}) + return err +} + +// Patch applies the patch and returns the patched managedClusterSetBinding. +func (c *FakeManagedClusterSetBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ManagedClusterSetBinding, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(managedclustersetbindingsResource, c.ns, name, pt, data, subresources...), &v1beta1.ManagedClusterSetBinding{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ManagedClusterSetBinding), err +} diff --git a/client/cluster/clientset/versioned/typed/cluster/v1beta1/generated_expansion.go b/client/cluster/clientset/versioned/typed/cluster/v1beta1/generated_expansion.go index 701affa7d..a514c2160 100644 --- a/client/cluster/clientset/versioned/typed/cluster/v1beta1/generated_expansion.go +++ b/client/cluster/clientset/versioned/typed/cluster/v1beta1/generated_expansion.go @@ -2,6 +2,10 @@ package v1beta1 +type ManagedClusterSetExpansion interface{} + +type ManagedClusterSetBindingExpansion interface{} + type PlacementExpansion interface{} type PlacementDecisionExpansion interface{} diff --git a/client/cluster/clientset/versioned/typed/cluster/v1beta1/managedclusterset.go b/client/cluster/clientset/versioned/typed/cluster/v1beta1/managedclusterset.go new file mode 100644 index 000000000..55fafd650 --- /dev/null +++ b/client/cluster/clientset/versioned/typed/cluster/v1beta1/managedclusterset.go @@ -0,0 +1,168 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + scheme "open-cluster-management.io/api/client/cluster/clientset/versioned/scheme" + v1beta1 "open-cluster-management.io/api/cluster/v1beta1" +) + +// ManagedClusterSetsGetter has a method to return a ManagedClusterSetInterface. +// A group's client should implement this interface. +type ManagedClusterSetsGetter interface { + ManagedClusterSets() ManagedClusterSetInterface +} + +// ManagedClusterSetInterface has methods to work with ManagedClusterSet resources. +type ManagedClusterSetInterface interface { + Create(ctx context.Context, managedClusterSet *v1beta1.ManagedClusterSet, opts v1.CreateOptions) (*v1beta1.ManagedClusterSet, error) + Update(ctx context.Context, managedClusterSet *v1beta1.ManagedClusterSet, opts v1.UpdateOptions) (*v1beta1.ManagedClusterSet, error) + UpdateStatus(ctx context.Context, managedClusterSet *v1beta1.ManagedClusterSet, opts v1.UpdateOptions) (*v1beta1.ManagedClusterSet, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ManagedClusterSet, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ManagedClusterSetList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ManagedClusterSet, err error) + ManagedClusterSetExpansion +} + +// managedClusterSets implements ManagedClusterSetInterface +type managedClusterSets struct { + client rest.Interface +} + +// newManagedClusterSets returns a ManagedClusterSets +func newManagedClusterSets(c *ClusterV1beta1Client) *managedClusterSets { + return &managedClusterSets{ + client: c.RESTClient(), + } +} + +// Get takes name of the managedClusterSet, and returns the corresponding managedClusterSet object, and an error if there is any. +func (c *managedClusterSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ManagedClusterSet, err error) { + result = &v1beta1.ManagedClusterSet{} + err = c.client.Get(). + Resource("managedclustersets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ManagedClusterSets that match those selectors. +func (c *managedClusterSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ManagedClusterSetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.ManagedClusterSetList{} + err = c.client.Get(). + Resource("managedclustersets"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested managedClusterSets. +func (c *managedClusterSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("managedclustersets"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a managedClusterSet and creates it. Returns the server's representation of the managedClusterSet, and an error, if there is any. +func (c *managedClusterSets) Create(ctx context.Context, managedClusterSet *v1beta1.ManagedClusterSet, opts v1.CreateOptions) (result *v1beta1.ManagedClusterSet, err error) { + result = &v1beta1.ManagedClusterSet{} + err = c.client.Post(). + Resource("managedclustersets"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(managedClusterSet). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a managedClusterSet and updates it. Returns the server's representation of the managedClusterSet, and an error, if there is any. +func (c *managedClusterSets) Update(ctx context.Context, managedClusterSet *v1beta1.ManagedClusterSet, opts v1.UpdateOptions) (result *v1beta1.ManagedClusterSet, err error) { + result = &v1beta1.ManagedClusterSet{} + err = c.client.Put(). + Resource("managedclustersets"). + Name(managedClusterSet.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(managedClusterSet). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *managedClusterSets) UpdateStatus(ctx context.Context, managedClusterSet *v1beta1.ManagedClusterSet, opts v1.UpdateOptions) (result *v1beta1.ManagedClusterSet, err error) { + result = &v1beta1.ManagedClusterSet{} + err = c.client.Put(). + Resource("managedclustersets"). + Name(managedClusterSet.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(managedClusterSet). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the managedClusterSet and deletes it. Returns an error if one occurs. +func (c *managedClusterSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("managedclustersets"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *managedClusterSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("managedclustersets"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched managedClusterSet. +func (c *managedClusterSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ManagedClusterSet, err error) { + result = &v1beta1.ManagedClusterSet{} + err = c.client.Patch(pt). + Resource("managedclustersets"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/client/cluster/clientset/versioned/typed/cluster/v1beta1/managedclustersetbinding.go b/client/cluster/clientset/versioned/typed/cluster/v1beta1/managedclustersetbinding.go new file mode 100644 index 000000000..9f81b120f --- /dev/null +++ b/client/cluster/clientset/versioned/typed/cluster/v1beta1/managedclustersetbinding.go @@ -0,0 +1,179 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + scheme "open-cluster-management.io/api/client/cluster/clientset/versioned/scheme" + v1beta1 "open-cluster-management.io/api/cluster/v1beta1" +) + +// ManagedClusterSetBindingsGetter has a method to return a ManagedClusterSetBindingInterface. +// A group's client should implement this interface. +type ManagedClusterSetBindingsGetter interface { + ManagedClusterSetBindings(namespace string) ManagedClusterSetBindingInterface +} + +// ManagedClusterSetBindingInterface has methods to work with ManagedClusterSetBinding resources. +type ManagedClusterSetBindingInterface interface { + Create(ctx context.Context, managedClusterSetBinding *v1beta1.ManagedClusterSetBinding, opts v1.CreateOptions) (*v1beta1.ManagedClusterSetBinding, error) + Update(ctx context.Context, managedClusterSetBinding *v1beta1.ManagedClusterSetBinding, opts v1.UpdateOptions) (*v1beta1.ManagedClusterSetBinding, error) + UpdateStatus(ctx context.Context, managedClusterSetBinding *v1beta1.ManagedClusterSetBinding, opts v1.UpdateOptions) (*v1beta1.ManagedClusterSetBinding, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ManagedClusterSetBinding, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ManagedClusterSetBindingList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ManagedClusterSetBinding, err error) + ManagedClusterSetBindingExpansion +} + +// managedClusterSetBindings implements ManagedClusterSetBindingInterface +type managedClusterSetBindings struct { + client rest.Interface + ns string +} + +// newManagedClusterSetBindings returns a ManagedClusterSetBindings +func newManagedClusterSetBindings(c *ClusterV1beta1Client, namespace string) *managedClusterSetBindings { + return &managedClusterSetBindings{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the managedClusterSetBinding, and returns the corresponding managedClusterSetBinding object, and an error if there is any. +func (c *managedClusterSetBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ManagedClusterSetBinding, err error) { + result = &v1beta1.ManagedClusterSetBinding{} + err = c.client.Get(). + Namespace(c.ns). + Resource("managedclustersetbindings"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ManagedClusterSetBindings that match those selectors. +func (c *managedClusterSetBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ManagedClusterSetBindingList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.ManagedClusterSetBindingList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("managedclustersetbindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested managedClusterSetBindings. +func (c *managedClusterSetBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("managedclustersetbindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a managedClusterSetBinding and creates it. Returns the server's representation of the managedClusterSetBinding, and an error, if there is any. +func (c *managedClusterSetBindings) Create(ctx context.Context, managedClusterSetBinding *v1beta1.ManagedClusterSetBinding, opts v1.CreateOptions) (result *v1beta1.ManagedClusterSetBinding, err error) { + result = &v1beta1.ManagedClusterSetBinding{} + err = c.client.Post(). + Namespace(c.ns). + Resource("managedclustersetbindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(managedClusterSetBinding). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a managedClusterSetBinding and updates it. Returns the server's representation of the managedClusterSetBinding, and an error, if there is any. +func (c *managedClusterSetBindings) Update(ctx context.Context, managedClusterSetBinding *v1beta1.ManagedClusterSetBinding, opts v1.UpdateOptions) (result *v1beta1.ManagedClusterSetBinding, err error) { + result = &v1beta1.ManagedClusterSetBinding{} + err = c.client.Put(). + Namespace(c.ns). + Resource("managedclustersetbindings"). + Name(managedClusterSetBinding.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(managedClusterSetBinding). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *managedClusterSetBindings) UpdateStatus(ctx context.Context, managedClusterSetBinding *v1beta1.ManagedClusterSetBinding, opts v1.UpdateOptions) (result *v1beta1.ManagedClusterSetBinding, err error) { + result = &v1beta1.ManagedClusterSetBinding{} + err = c.client.Put(). + Namespace(c.ns). + Resource("managedclustersetbindings"). + Name(managedClusterSetBinding.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(managedClusterSetBinding). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the managedClusterSetBinding and deletes it. Returns an error if one occurs. +func (c *managedClusterSetBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("managedclustersetbindings"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *managedClusterSetBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("managedclustersetbindings"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched managedClusterSetBinding. +func (c *managedClusterSetBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ManagedClusterSetBinding, err error) { + result = &v1beta1.ManagedClusterSetBinding{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("managedclustersetbindings"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/client/cluster/informers/externalversions/cluster/v1beta1/interface.go b/client/cluster/informers/externalversions/cluster/v1beta1/interface.go index e64212692..24ee82fb5 100644 --- a/client/cluster/informers/externalversions/cluster/v1beta1/interface.go +++ b/client/cluster/informers/externalversions/cluster/v1beta1/interface.go @@ -8,6 +8,10 @@ import ( // Interface provides access to all the informers in this group version. type Interface interface { + // ManagedClusterSets returns a ManagedClusterSetInformer. + ManagedClusterSets() ManagedClusterSetInformer + // ManagedClusterSetBindings returns a ManagedClusterSetBindingInformer. + ManagedClusterSetBindings() ManagedClusterSetBindingInformer // Placements returns a PlacementInformer. Placements() PlacementInformer // PlacementDecisions returns a PlacementDecisionInformer. @@ -25,6 +29,16 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } +// ManagedClusterSets returns a ManagedClusterSetInformer. +func (v *version) ManagedClusterSets() ManagedClusterSetInformer { + return &managedClusterSetInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// ManagedClusterSetBindings returns a ManagedClusterSetBindingInformer. +func (v *version) ManagedClusterSetBindings() ManagedClusterSetBindingInformer { + return &managedClusterSetBindingInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + // Placements returns a PlacementInformer. func (v *version) Placements() PlacementInformer { return &placementInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} diff --git a/client/cluster/informers/externalversions/cluster/v1beta1/managedclusterset.go b/client/cluster/informers/externalversions/cluster/v1beta1/managedclusterset.go new file mode 100644 index 000000000..237b058dc --- /dev/null +++ b/client/cluster/informers/externalversions/cluster/v1beta1/managedclusterset.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + time "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + versioned "open-cluster-management.io/api/client/cluster/clientset/versioned" + internalinterfaces "open-cluster-management.io/api/client/cluster/informers/externalversions/internalinterfaces" + v1beta1 "open-cluster-management.io/api/client/cluster/listers/cluster/v1beta1" + clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1" +) + +// ManagedClusterSetInformer provides access to a shared informer and lister for +// ManagedClusterSets. +type ManagedClusterSetInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.ManagedClusterSetLister +} + +type managedClusterSetInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewManagedClusterSetInformer constructs a new informer for ManagedClusterSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewManagedClusterSetInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredManagedClusterSetInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredManagedClusterSetInformer constructs a new informer for ManagedClusterSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredManagedClusterSetInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ClusterV1beta1().ManagedClusterSets().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ClusterV1beta1().ManagedClusterSets().Watch(context.TODO(), options) + }, + }, + &clusterv1beta1.ManagedClusterSet{}, + resyncPeriod, + indexers, + ) +} + +func (f *managedClusterSetInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredManagedClusterSetInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *managedClusterSetInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&clusterv1beta1.ManagedClusterSet{}, f.defaultInformer) +} + +func (f *managedClusterSetInformer) Lister() v1beta1.ManagedClusterSetLister { + return v1beta1.NewManagedClusterSetLister(f.Informer().GetIndexer()) +} diff --git a/client/cluster/informers/externalversions/cluster/v1beta1/managedclustersetbinding.go b/client/cluster/informers/externalversions/cluster/v1beta1/managedclustersetbinding.go new file mode 100644 index 000000000..b552b5a30 --- /dev/null +++ b/client/cluster/informers/externalversions/cluster/v1beta1/managedclustersetbinding.go @@ -0,0 +1,74 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + time "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + versioned "open-cluster-management.io/api/client/cluster/clientset/versioned" + internalinterfaces "open-cluster-management.io/api/client/cluster/informers/externalversions/internalinterfaces" + v1beta1 "open-cluster-management.io/api/client/cluster/listers/cluster/v1beta1" + clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1" +) + +// ManagedClusterSetBindingInformer provides access to a shared informer and lister for +// ManagedClusterSetBindings. +type ManagedClusterSetBindingInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.ManagedClusterSetBindingLister +} + +type managedClusterSetBindingInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewManagedClusterSetBindingInformer constructs a new informer for ManagedClusterSetBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewManagedClusterSetBindingInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredManagedClusterSetBindingInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredManagedClusterSetBindingInformer constructs a new informer for ManagedClusterSetBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredManagedClusterSetBindingInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ClusterV1beta1().ManagedClusterSetBindings(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ClusterV1beta1().ManagedClusterSetBindings(namespace).Watch(context.TODO(), options) + }, + }, + &clusterv1beta1.ManagedClusterSetBinding{}, + resyncPeriod, + indexers, + ) +} + +func (f *managedClusterSetBindingInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredManagedClusterSetBindingInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *managedClusterSetBindingInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&clusterv1beta1.ManagedClusterSetBinding{}, f.defaultInformer) +} + +func (f *managedClusterSetBindingInformer) Lister() v1beta1.ManagedClusterSetBindingLister { + return v1beta1.NewManagedClusterSetBindingLister(f.Informer().GetIndexer()) +} diff --git a/client/cluster/informers/externalversions/generic.go b/client/cluster/informers/externalversions/generic.go index a6ccc7332..6956a771d 100644 --- a/client/cluster/informers/externalversions/generic.go +++ b/client/cluster/informers/externalversions/generic.go @@ -50,6 +50,10 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Cluster().V1alpha1().ClusterClaims().Informer()}, nil // Group=cluster.open-cluster-management.io, Version=v1beta1 + case v1beta1.SchemeGroupVersion.WithResource("managedclustersets"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Cluster().V1beta1().ManagedClusterSets().Informer()}, nil + case v1beta1.SchemeGroupVersion.WithResource("managedclustersetbindings"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Cluster().V1beta1().ManagedClusterSetBindings().Informer()}, nil case v1beta1.SchemeGroupVersion.WithResource("placements"): return &genericInformer{resource: resource.GroupResource(), informer: f.Cluster().V1beta1().Placements().Informer()}, nil case v1beta1.SchemeGroupVersion.WithResource("placementdecisions"): diff --git a/client/cluster/listers/cluster/v1beta1/expansion_generated.go b/client/cluster/listers/cluster/v1beta1/expansion_generated.go index 376343dc2..c90981a28 100644 --- a/client/cluster/listers/cluster/v1beta1/expansion_generated.go +++ b/client/cluster/listers/cluster/v1beta1/expansion_generated.go @@ -2,6 +2,18 @@ package v1beta1 +// ManagedClusterSetListerExpansion allows custom methods to be added to +// ManagedClusterSetLister. +type ManagedClusterSetListerExpansion interface{} + +// ManagedClusterSetBindingListerExpansion allows custom methods to be added to +// ManagedClusterSetBindingLister. +type ManagedClusterSetBindingListerExpansion interface{} + +// ManagedClusterSetBindingNamespaceListerExpansion allows custom methods to be added to +// ManagedClusterSetBindingNamespaceLister. +type ManagedClusterSetBindingNamespaceListerExpansion interface{} + // PlacementListerExpansion allows custom methods to be added to // PlacementLister. type PlacementListerExpansion interface{} diff --git a/client/cluster/listers/cluster/v1beta1/managedclusterset.go b/client/cluster/listers/cluster/v1beta1/managedclusterset.go new file mode 100644 index 000000000..331aa59b4 --- /dev/null +++ b/client/cluster/listers/cluster/v1beta1/managedclusterset.go @@ -0,0 +1,52 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" + v1beta1 "open-cluster-management.io/api/cluster/v1beta1" +) + +// ManagedClusterSetLister helps list ManagedClusterSets. +// All objects returned here must be treated as read-only. +type ManagedClusterSetLister interface { + // List lists all ManagedClusterSets in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1beta1.ManagedClusterSet, err error) + // Get retrieves the ManagedClusterSet from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1beta1.ManagedClusterSet, error) + ManagedClusterSetListerExpansion +} + +// managedClusterSetLister implements the ManagedClusterSetLister interface. +type managedClusterSetLister struct { + indexer cache.Indexer +} + +// NewManagedClusterSetLister returns a new ManagedClusterSetLister. +func NewManagedClusterSetLister(indexer cache.Indexer) ManagedClusterSetLister { + return &managedClusterSetLister{indexer: indexer} +} + +// List lists all ManagedClusterSets in the indexer. +func (s *managedClusterSetLister) List(selector labels.Selector) (ret []*v1beta1.ManagedClusterSet, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.ManagedClusterSet)) + }) + return ret, err +} + +// Get retrieves the ManagedClusterSet from the index for a given name. +func (s *managedClusterSetLister) Get(name string) (*v1beta1.ManagedClusterSet, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("managedclusterset"), name) + } + return obj.(*v1beta1.ManagedClusterSet), nil +} diff --git a/client/cluster/listers/cluster/v1beta1/managedclustersetbinding.go b/client/cluster/listers/cluster/v1beta1/managedclustersetbinding.go new file mode 100644 index 000000000..f7a1e301c --- /dev/null +++ b/client/cluster/listers/cluster/v1beta1/managedclustersetbinding.go @@ -0,0 +1,83 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" + v1beta1 "open-cluster-management.io/api/cluster/v1beta1" +) + +// ManagedClusterSetBindingLister helps list ManagedClusterSetBindings. +// All objects returned here must be treated as read-only. +type ManagedClusterSetBindingLister interface { + // List lists all ManagedClusterSetBindings in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1beta1.ManagedClusterSetBinding, err error) + // ManagedClusterSetBindings returns an object that can list and get ManagedClusterSetBindings. + ManagedClusterSetBindings(namespace string) ManagedClusterSetBindingNamespaceLister + ManagedClusterSetBindingListerExpansion +} + +// managedClusterSetBindingLister implements the ManagedClusterSetBindingLister interface. +type managedClusterSetBindingLister struct { + indexer cache.Indexer +} + +// NewManagedClusterSetBindingLister returns a new ManagedClusterSetBindingLister. +func NewManagedClusterSetBindingLister(indexer cache.Indexer) ManagedClusterSetBindingLister { + return &managedClusterSetBindingLister{indexer: indexer} +} + +// List lists all ManagedClusterSetBindings in the indexer. +func (s *managedClusterSetBindingLister) List(selector labels.Selector) (ret []*v1beta1.ManagedClusterSetBinding, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.ManagedClusterSetBinding)) + }) + return ret, err +} + +// ManagedClusterSetBindings returns an object that can list and get ManagedClusterSetBindings. +func (s *managedClusterSetBindingLister) ManagedClusterSetBindings(namespace string) ManagedClusterSetBindingNamespaceLister { + return managedClusterSetBindingNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ManagedClusterSetBindingNamespaceLister helps list and get ManagedClusterSetBindings. +// All objects returned here must be treated as read-only. +type ManagedClusterSetBindingNamespaceLister interface { + // List lists all ManagedClusterSetBindings in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1beta1.ManagedClusterSetBinding, err error) + // Get retrieves the ManagedClusterSetBinding from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1beta1.ManagedClusterSetBinding, error) + ManagedClusterSetBindingNamespaceListerExpansion +} + +// managedClusterSetBindingNamespaceLister implements the ManagedClusterSetBindingNamespaceLister +// interface. +type managedClusterSetBindingNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ManagedClusterSetBindings in the indexer for a given namespace. +func (s managedClusterSetBindingNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.ManagedClusterSetBinding, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.ManagedClusterSetBinding)) + }) + return ret, err +} + +// Get retrieves the ManagedClusterSetBinding from the indexer for a given namespace and name. +func (s managedClusterSetBindingNamespaceLister) Get(name string) (*v1beta1.ManagedClusterSetBinding, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("managedclustersetbinding"), name) + } + return obj.(*v1beta1.ManagedClusterSetBinding), nil +} diff --git a/cluster/v1alpha1/helpers.go b/cluster/v1alpha1/helpers.go index d70865825..7af8bd1b7 100644 --- a/cluster/v1alpha1/helpers.go +++ b/cluster/v1alpha1/helpers.go @@ -37,11 +37,10 @@ const ( Skip ) -// ClusterRolloutStatusFunc defines a function to return the rollout status for a managed cluster. -type ClusterRolloutStatusFunc func(clusterName string) ClusterRolloutStatus - // ClusterRolloutStatus holds the rollout status information for a cluster. type ClusterRolloutStatus struct { + // cluster name + ClusterName string // GroupKey represents the cluster group key (optional field). GroupKey clusterv1beta1.GroupKey // Status is the required field indicating the rollout status. @@ -53,50 +52,58 @@ type ClusterRolloutStatus struct { TimeOutTime *metav1.Time } -// RolloutResult contains the clusters to be rolled out and the clusters that have timed out. +// RolloutResult contains list of clusters that are timeOut, removed and required to rollOut type RolloutResult struct { - // ClustersToRollout is a map where the key is the cluster name and the value is the ClusterRolloutStatus. - ClustersToRollout map[string]ClusterRolloutStatus - // ClustersTimeOut is a map where the key is the cluster name and the value is the ClusterRolloutStatus. - ClustersTimeOut map[string]ClusterRolloutStatus + // ClustersToRollout is a slice of ClusterRolloutStatus that will be rolled out. + ClustersToRollout []ClusterRolloutStatus + // ClustersTimeOut is a slice of ClusterRolloutStatus that are timeout. + ClustersTimeOut []ClusterRolloutStatus + // ClustersRemoved is a slice of ClusterRolloutStatus that are removed. + ClustersRemoved []ClusterRolloutStatus } +// ClusterRolloutStatusFunc defines a function that return the rollout status for a given workload. +// +k8s:deepcopy-gen=false +type ClusterRolloutStatusFunc[T any] func(clusterName string, workload T) (ClusterRolloutStatus, error) + +// The RolloutHandler required workload type (interface/struct) to be assigned to the generic type. +// The custom implementation of the ClusterRolloutStatusFunc is required to use the RolloutHandler. // +k8s:deepcopy-gen=false -type RolloutHandler struct { +type RolloutHandler[T any] struct { // placement decision tracker - pdTracker *clusterv1beta1.PlacementDecisionClustersTracker + pdTracker *clusterv1beta1.PlacementDecisionClustersTracker + statusFunc ClusterRolloutStatusFunc[T] } -func NewRolloutHandler(pdTracker *clusterv1beta1.PlacementDecisionClustersTracker) (*RolloutHandler, error) { +// NewRolloutHandler creates a new RolloutHandler with the give workload type. +func NewRolloutHandler[T any](pdTracker *clusterv1beta1.PlacementDecisionClustersTracker, statusFunc ClusterRolloutStatusFunc[T]) (*RolloutHandler[T], error) { if pdTracker == nil { return nil, fmt.Errorf("invalid placement decision tracker %v", pdTracker) } - return &RolloutHandler{pdTracker: pdTracker}, nil + return &RolloutHandler[T]{pdTracker: pdTracker, statusFunc: statusFunc}, nil } -// The input is a duck type RolloutStrategy and a ClusterRolloutStatusFunc to return the rollout status on each managed cluster. -// Return the strategy actual take effect and a list of clusters that need to rollout and that are timeout. -// -// ClustersToRollout: If mandatory decision groups are defined in strategy, will return the clusters to rollout in mandatory decision groups first. -// When all the mandatory decision groups rollout successfully, will return the rest of the clusters that need to rollout. +// The input are a RolloutStrategy and existingClusterRolloutStatus list. +// The existing ClusterRolloutStatus list should be created using the ClusterRolloutStatusFunc to determine the current workload rollout status. +// The existing ClusterRolloutStatus list should contain all the current workloads rollout status such as ToApply, Progressing, Succeeded, +// Failed, TimeOut and Skip in order to determine the added, removed, timeout clusters and next clusters to rollout. // -// ClustersTimeOut: If the cluster status is Progressing or Failed, and the status lasts longer than timeout defined in strategy, -// will list them RolloutResult.ClustersTimeOut with status TimeOut. -func (r *RolloutHandler) GetRolloutCluster(rolloutStrategy RolloutStrategy, statusFunc ClusterRolloutStatusFunc) (*RolloutStrategy, RolloutResult, error) { +// Return the actual RolloutStrategy that take effect and a RolloutResult contain list of ClusterToRollout, ClustersTimeout and ClusterRemoved. +func (r *RolloutHandler[T]) GetRolloutCluster(rolloutStrategy RolloutStrategy, existingClusterStatus []ClusterRolloutStatus) (*RolloutStrategy, RolloutResult, error) { switch rolloutStrategy.Type { case All: - return r.getRolloutAllClusters(rolloutStrategy, statusFunc) + return r.getRolloutAllClusters(rolloutStrategy, existingClusterStatus) case Progressive: - return r.getProgressiveClusters(rolloutStrategy, statusFunc) + return r.getProgressiveClusters(rolloutStrategy, existingClusterStatus) case ProgressivePerGroup: - return r.getProgressivePerGroupClusters(rolloutStrategy, statusFunc) + return r.getProgressivePerGroupClusters(rolloutStrategy, existingClusterStatus) default: return nil, RolloutResult{}, fmt.Errorf("incorrect rollout strategy type %v", rolloutStrategy.Type) } } -func (r *RolloutHandler) getRolloutAllClusters(rolloutStrategy RolloutStrategy, statusFunc ClusterRolloutStatusFunc) (*RolloutStrategy, RolloutResult, error) { +func (r *RolloutHandler[T]) getRolloutAllClusters(rolloutStrategy RolloutStrategy, existingClusterStatus []ClusterRolloutStatus) (*RolloutStrategy, RolloutResult, error) { // Prepare the rollout strategy strategy := RolloutStrategy{Type: All} strategy.All = rolloutStrategy.All.DeepCopy() @@ -110,15 +117,18 @@ func (r *RolloutHandler) getRolloutAllClusters(rolloutStrategy RolloutStrategy, return &strategy, RolloutResult{}, err } - // Get all clusters and perform progressive rollout - totalClusterGroups := r.pdTracker.ExistingClusterGroupsBesides() - totalClusters := totalClusterGroups.GetClusters().UnsortedList() - rolloutResult := progressivePerCluster(totalClusterGroups, len(totalClusters), failureTimeout, statusFunc) + allClusterGroups := r.pdTracker.ExistingClusterGroupsBesides() + allClusters := allClusterGroups.GetClusters().UnsortedList() + + // Check for removed Clusters + currentClusterStatus, removedClusterStatus := r.getRemovedClusters(allClusterGroups, existingClusterStatus) + rolloutResult := progressivePerCluster(allClusterGroups, len(allClusters), failureTimeout, currentClusterStatus) + rolloutResult.ClustersRemoved = removedClusterStatus return &strategy, rolloutResult, nil } -func (r *RolloutHandler) getProgressiveClusters(rolloutStrategy RolloutStrategy, statusFunc ClusterRolloutStatusFunc) (*RolloutStrategy, RolloutResult, error) { +func (r *RolloutHandler[T]) getProgressiveClusters(rolloutStrategy RolloutStrategy, existingClusterStatus []ClusterRolloutStatus) (*RolloutStrategy, RolloutResult, error) { // Prepare the rollout strategy strategy := RolloutStrategy{Type: Progressive} strategy.Progressive = rolloutStrategy.Progressive.DeepCopy() @@ -126,37 +136,45 @@ func (r *RolloutHandler) getProgressiveClusters(rolloutStrategy RolloutStrategy, strategy.Progressive = &RolloutProgressive{} } - // Upgrade mandatory decision groups first - groupKeys := decisionGroupsToGroupKeys(strategy.Progressive.MandatoryDecisionGroups.MandatoryDecisionGroups) - clusterGroups := r.pdTracker.ExistingClusterGroups(groupKeys...) - - // Perform progressive rollout for mandatory decision groups - rolloutResult := progressivePerGroup(clusterGroups, maxTimeDuration, statusFunc) - if len(rolloutResult.ClustersToRollout) > 0 { - return &strategy, rolloutResult, nil - } - // Parse timeout for non-mandatory decision groups failureTimeout, err := parseTimeout(strategy.Progressive.Timeout.Timeout) if err != nil { return &strategy, RolloutResult{}, err } - // Calculate the length for progressive rollout - totalClusters := r.pdTracker.ExistingClusterGroupsBesides().GetClusters() - length, err := calculateLength(strategy.Progressive.MaxConcurrency, len(totalClusters)) + // Check for removed clusters + clusterGroups := r.pdTracker.ExistingClusterGroupsBesides() + currentClusterStatus, removedClusterStatus := r.getRemovedClusters(clusterGroups, existingClusterStatus) + + // Upgrade mandatory decision groups first + groupKeys := decisionGroupsToGroupKeys(strategy.Progressive.MandatoryDecisionGroups.MandatoryDecisionGroups) + clusterGroups = r.pdTracker.ExistingClusterGroups(groupKeys...) + + // Perform progressive rollOut for mandatory decision groups first. + if len(clusterGroups) > 0 { + rolloutResult := progressivePerGroup(clusterGroups, failureTimeout, currentClusterStatus) + if len(rolloutResult.ClustersToRollout) > 0 || len(rolloutResult.ClustersTimeOut) > 0 { + rolloutResult.ClustersRemoved = removedClusterStatus + return &strategy, rolloutResult, nil + } + } + + // Calculate the size of progressive rollOut + // If the MaxConcurrency not defined, total clusters length is considered as maxConcurrency. + clusterGroups = r.pdTracker.ExistingClusterGroupsBesides(groupKeys...) + length, err := calculateRolloutSize(strategy.Progressive.MaxConcurrency, len(clusterGroups.GetClusters())) if err != nil { return &strategy, RolloutResult{}, err } - // Upgrade the remaining clusters - restClusterGroups := r.pdTracker.ExistingClusterGroupsBesides(clusterGroups.GetOrderedGroupKeys()...) - rolloutResult = progressivePerCluster(restClusterGroups, length, failureTimeout, statusFunc) + // Rollout the remaining clusters + rolloutResult := progressivePerCluster(clusterGroups, length, failureTimeout, currentClusterStatus) + rolloutResult.ClustersRemoved = removedClusterStatus return &strategy, rolloutResult, nil } -func (r *RolloutHandler) getProgressivePerGroupClusters(rolloutStrategy RolloutStrategy, statusFunc ClusterRolloutStatusFunc) (*RolloutStrategy, RolloutResult, error) { +func (r *RolloutHandler[T]) getProgressivePerGroupClusters(rolloutStrategy RolloutStrategy, existingClusterStatus []ClusterRolloutStatus) (*RolloutStrategy, RolloutResult, error) { // Prepare the rollout strategy strategy := RolloutStrategy{Type: ProgressivePerGroup} strategy.ProgressivePerGroup = rolloutStrategy.ProgressivePerGroup.DeepCopy() @@ -164,65 +182,99 @@ func (r *RolloutHandler) getProgressivePerGroupClusters(rolloutStrategy RolloutS strategy.ProgressivePerGroup = &RolloutProgressivePerGroup{} } + // Parse timeout for non-mandatory decision groups + failureTimeout, err := parseTimeout(strategy.ProgressivePerGroup.Timeout.Timeout) + if err != nil { + return &strategy, RolloutResult{}, err + } + + // Check for removed Clusters + clusterGroups := r.pdTracker.ExistingClusterGroupsBesides() + currentClusterStatus, removedClusterStatus := r.getRemovedClusters(clusterGroups, existingClusterStatus) + // Upgrade mandatory decision groups first mandatoryDecisionGroups := strategy.ProgressivePerGroup.MandatoryDecisionGroups.MandatoryDecisionGroups groupKeys := decisionGroupsToGroupKeys(mandatoryDecisionGroups) - clusterGroups := r.pdTracker.ExistingClusterGroups(groupKeys...) + clusterGroups = r.pdTracker.ExistingClusterGroups(groupKeys...) - // Perform progressive rollout per group for mandatory decision groups - rolloutResult := progressivePerGroup(clusterGroups, maxTimeDuration, statusFunc) - if len(rolloutResult.ClustersToRollout) > 0 { - return &strategy, rolloutResult, nil - } + // Perform progressive rollout per group for mandatory decision groups first + if len(clusterGroups) > 0 { + rolloutResult := progressivePerGroup(clusterGroups, failureTimeout, currentClusterStatus) - // Parse timeout for non-mandatory decision groups - failureTimeout, err := parseTimeout(strategy.ProgressivePerGroup.Timeout.Timeout) - if err != nil { - return &strategy, RolloutResult{}, err + if len(rolloutResult.ClustersToRollout) > 0 || len(rolloutResult.ClustersTimeOut) > 0 { + rolloutResult.ClustersRemoved = removedClusterStatus + return &strategy, rolloutResult, nil + } } - // Upgrade the rest of the decision groups - restClusterGroups := r.pdTracker.ExistingClusterGroupsBesides(clusterGroups.GetOrderedGroupKeys()...) + // RollOut the rest of the decision groups + restClusterGroups := r.pdTracker.ExistingClusterGroupsBesides(groupKeys...) // Perform progressive rollout per group for the remaining decision groups - rolloutResult = progressivePerGroup(restClusterGroups, failureTimeout, statusFunc) + rolloutResult := progressivePerGroup(restClusterGroups, failureTimeout, currentClusterStatus) + rolloutResult.ClustersRemoved = removedClusterStatus + return &strategy, rolloutResult, nil } -func progressivePerCluster(clusterGroupsMap clusterv1beta1.ClusterGroupsMap, length int, timeout time.Duration, statusFunc ClusterRolloutStatusFunc) RolloutResult { - rolloutClusters := map[string]ClusterRolloutStatus{} - timeoutClusters := map[string]ClusterRolloutStatus{} +func (r *RolloutHandler[T]) getRemovedClusters(clusterGroupsMap clusterv1beta1.ClusterGroupsMap, existingClusterStatus []ClusterRolloutStatus) ([]ClusterRolloutStatus, []ClusterRolloutStatus) { + var currentClusterStatus, removedClusterStatus []ClusterRolloutStatus + + clusters := clusterGroupsMap.GetClusters().UnsortedList() + for _, clusterStatus := range existingClusterStatus { + exist := false + for _, cluster := range clusters { + if clusterStatus.ClusterName == cluster { + exist = true + currentClusterStatus = append(currentClusterStatus, clusterStatus) + break + } + } + + if !exist { + removedClusterStatus = append(removedClusterStatus, clusterStatus) + } + } + return currentClusterStatus, removedClusterStatus +} + +func progressivePerCluster(clusterGroupsMap clusterv1beta1.ClusterGroupsMap, length int, timeout time.Duration, existingClusterStatus []ClusterRolloutStatus) RolloutResult { + var rolloutClusters, timeoutClusters []ClusterRolloutStatus + existingClusters := make(map[string]bool) - if length == 0 { - return RolloutResult{ - ClustersToRollout: rolloutClusters, - ClustersTimeOut: timeoutClusters, + for _, status := range existingClusterStatus { + if status.ClusterName == "" { + continue + } + + existingClusters[status.ClusterName] = true + rolloutClusters, timeoutClusters = determineRolloutStatus(status, timeout, rolloutClusters, timeoutClusters) + + if len(rolloutClusters) >= length { + return RolloutResult{ + ClustersToRollout: rolloutClusters, + ClustersTimeOut: timeoutClusters, + } } } clusters := clusterGroupsMap.GetClusters().UnsortedList() clusterToGroupKey := clusterGroupsMap.ClusterToGroupKey() - // Sort the clusters in alphabetical order to ensure consistency. sort.Strings(clusters) for _, cluster := range clusters { - status := statusFunc(cluster) - if groupKey, exists := clusterToGroupKey[cluster]; exists { - status.GroupKey = groupKey + if existingClusters[cluster] { + continue } - newStatus, needToRollout := determineRolloutStatusAndContinue(status, timeout) - status.Status = newStatus.Status - status.TimeOutTime = newStatus.TimeOutTime - - if needToRollout { - rolloutClusters[cluster] = status - } - if status.Status == TimeOut { - timeoutClusters[cluster] = status + status := ClusterRolloutStatus{ + ClusterName: cluster, + Status: ToApply, + GroupKey: clusterToGroupKey[cluster], } + rolloutClusters = append(rolloutClusters, status) - if len(rolloutClusters)%length == 0 && len(rolloutClusters) > 0 { + if len(rolloutClusters) >= length { return RolloutResult{ ClustersToRollout: rolloutClusters, ClustersTimeOut: timeoutClusters, @@ -236,32 +288,44 @@ func progressivePerCluster(clusterGroupsMap clusterv1beta1.ClusterGroupsMap, len } } -func progressivePerGroup(clusterGroupsMap clusterv1beta1.ClusterGroupsMap, timeout time.Duration, statusFunc ClusterRolloutStatusFunc) RolloutResult { - rolloutClusters := map[string]ClusterRolloutStatus{} - timeoutClusters := map[string]ClusterRolloutStatus{} +func progressivePerGroup(clusterGroupsMap clusterv1beta1.ClusterGroupsMap, timeout time.Duration, existingClusterStatus []ClusterRolloutStatus) RolloutResult { + var rolloutClusters, timeoutClusters []ClusterRolloutStatus + existingClusters := make(map[string]bool) - clusterGroupKeys := clusterGroupsMap.GetOrderedGroupKeys() + for _, status := range existingClusterStatus { + if status.ClusterName == "" { + continue + } + + if status.Status == ToApply { + // Set as false to consider the cluster in the decisionGroups iteration. + existingClusters[status.ClusterName] = false + } else { + existingClusters[status.ClusterName] = true + rolloutClusters, timeoutClusters = determineRolloutStatus(status, timeout, rolloutClusters, timeoutClusters) + } + } + clusterGroupKeys := clusterGroupsMap.GetOrderedGroupKeys() for _, key := range clusterGroupKeys { if subclusters, ok := clusterGroupsMap[key]; ok { // Iterate through clusters in the group - for _, cluster := range subclusters.UnsortedList() { - status := statusFunc(cluster) - status.GroupKey = key - - newStatus, needToRollout := determineRolloutStatusAndContinue(status, timeout) - status.Status = newStatus.Status - status.TimeOutTime = newStatus.TimeOutTime - - if needToRollout { - rolloutClusters[cluster] = status + clusters := subclusters.UnsortedList() + sort.Strings(clusters) + for _, cluster := range clusters { + if existingClusters[cluster] { + continue } - if status.Status == TimeOut { - timeoutClusters[cluster] = status + + status := ClusterRolloutStatus{ + ClusterName: cluster, + Status: ToApply, + GroupKey: key, } + rolloutClusters = append(rolloutClusters, status) } - // Return if there are clusters to rollout + // As it is perGroup Return if there are clusters to rollOut if len(rolloutClusters) > 0 { return RolloutResult{ ClustersToRollout: rolloutClusters, @@ -277,36 +341,33 @@ func progressivePerGroup(clusterGroupsMap clusterv1beta1.ClusterGroupsMap, timeo } } -// determineRolloutStatusAndContinue checks whether a cluster should continue its rollout based on -// its current status and timeout. The function returns an updated cluster status and a boolean -// indicating whether the rollout should continue. +// determineRolloutStatus checks whether a cluster should continue its rollout based on its current status and timeout. +// The function update the cluster status and append it to the expected slice. // -// The timeout parameter is utilized for handling progressing and failed statuses: -// 1. If timeout is set to None (maxTimeDuration), the function will wait until cluster reaching a success status. -// It returns true to include the cluster in the result and halts the rollout of other clusters or groups. -// 2. If timeout is set to 0, the function proceeds with upgrading other clusters without waiting. -// It returns false to skip waiting for the cluster to reach a success status and continues to rollout others. -func determineRolloutStatusAndContinue(status ClusterRolloutStatus, timeout time.Duration) (*ClusterRolloutStatus, bool) { - newStatus := status.DeepCopy() +// The timeout parameter is utilized for handling progressing and failed statuses and any other unknown status: +// 1. If timeout is set to None (maxTimeDuration), the function will append the clusterStatus to the rollOut Clusters. +// 2. If timeout is set to 0, the function append the clusterStatus to the timeOut clusters. +func determineRolloutStatus(status ClusterRolloutStatus, timeout time.Duration, rolloutClusters []ClusterRolloutStatus, timeoutClusters []ClusterRolloutStatus) ([]ClusterRolloutStatus, []ClusterRolloutStatus) { + switch status.Status { case ToApply: - return newStatus, true + rolloutClusters = append(rolloutClusters, status) case TimeOut, Succeeded, Skip: - return newStatus, false - case Progressing, Failed: + return rolloutClusters, timeoutClusters + default: // For progressing, failed status and any other unknown status. timeOutTime := getTimeOutTime(status.LastTransitionTime, timeout) - newStatus.TimeOutTime = timeOutTime + status.TimeOutTime = timeOutTime // check if current time is before the timeout time if RolloutClock.Now().Before(timeOutTime.Time) { - return newStatus, true + rolloutClusters = append(rolloutClusters, status) } else { - newStatus.Status = TimeOut - return newStatus, false + status.Status = TimeOut + timeoutClusters = append(timeoutClusters, status) } - default: - return newStatus, true } + + return rolloutClusters, timeoutClusters } // get the timeout time @@ -320,7 +381,7 @@ func getTimeOutTime(startTime *metav1.Time, timeout time.Duration) *metav1.Time return &metav1.Time{Time: timeoutTime} } -func calculateLength(maxConcurrency intstr.IntOrString, total int) (int, error) { +func calculateRolloutSize(maxConcurrency intstr.IntOrString, total int) (int, error) { length := total switch maxConcurrency.Type { diff --git a/cluster/v1alpha1/helpers_test.go b/cluster/v1alpha1/helpers_test.go index badcb6b28..396e3bd9e 100644 --- a/cluster/v1alpha1/helpers_test.go +++ b/cluster/v1alpha1/helpers_test.go @@ -16,7 +16,6 @@ import ( ) var fakeTime = metav1.NewTime(time.Date(2022, time.January, 01, 0, 0, 0, 0, time.UTC)) -var fakeTimeMax = metav1.NewTime(fakeTime.Add(maxTimeDuration)) var fakeTimeMax_60s = metav1.NewTime(fakeTime.Add(maxTimeDuration - time.Minute)) var fakeTimeMax_120s = metav1.NewTime(fakeTime.Add(maxTimeDuration - 2*time.Minute)) var fakeTime30s = metav1.NewTime(fakeTime.Add(30 * time.Second)) @@ -28,100 +27,125 @@ type FakePlacementDecisionGetter struct { FakeDecisions []*clusterv1beta1.PlacementDecision } +// Dummy workload type that will be used to create a RolloutHandler. +type dummyWorkload struct { + ClusterGroup clusterv1beta1.GroupKey + ClusterName string + State string + LastTransitionTime *metav1.Time +} + +// Dummy Workload status +const ( + valid = "valid" + applying = "applying" + done = "done" + missing = "missing" +) + +// Dummy ClusterRolloutStatusFunc implementation that will be used to create a RolloutHandler. +func dummyWorkloadClusterRolloutStatusFunc(clusterName string, workload dummyWorkload) (ClusterRolloutStatus, error) { + // workload obj should be used to determine the clusterRolloutStatus. + switch workload.State { + case valid: + return ClusterRolloutStatus{GroupKey: workload.ClusterGroup, ClusterName: clusterName, Status: ToApply, LastTransitionTime: workload.LastTransitionTime}, nil + case applying: + return ClusterRolloutStatus{GroupKey: workload.ClusterGroup, ClusterName: clusterName, Status: Progressing, LastTransitionTime: workload.LastTransitionTime}, nil + case done: + return ClusterRolloutStatus{GroupKey: workload.ClusterGroup, ClusterName: clusterName, Status: Succeeded, LastTransitionTime: workload.LastTransitionTime}, nil + case missing: + return ClusterRolloutStatus{GroupKey: workload.ClusterGroup, ClusterName: clusterName, Status: Failed, LastTransitionTime: workload.LastTransitionTime}, nil + default: + return ClusterRolloutStatus{GroupKey: workload.ClusterGroup, ClusterName: clusterName, Status: ToApply, LastTransitionTime: workload.LastTransitionTime}, nil + } +} + +type testCase struct { + name string + rolloutStrategy RolloutStrategy + existingScheduledClusterGroups map[clusterv1beta1.GroupKey]sets.Set[string] + clusterRolloutStatusFunc ClusterRolloutStatusFunc[dummyWorkload] // Using type dummy workload obj + expectRolloutStrategy *RolloutStrategy + existingWorkloads []dummyWorkload + expectRolloutClusters []ClusterRolloutStatus + expectTimeOutClusters []ClusterRolloutStatus + expectRemovedClusters []ClusterRolloutStatus +} + func (f *FakePlacementDecisionGetter) List(selector labels.Selector, namespace string) (ret []*clusterv1beta1.PlacementDecision, err error) { return f.FakeDecisions, nil } func TestGetRolloutCluster_All(t *testing.T) { - tests := []struct { - name string - rolloutStrategy RolloutStrategy - existingScheduledClusterGroups map[clusterv1beta1.GroupKey]sets.Set[string] - clusterRolloutStatusFunc ClusterRolloutStatusFunc - expectRolloutStrategy *RolloutStrategy - expectRolloutClusters map[string]ClusterRolloutStatus - expectTimeOutClusters map[string]ClusterRolloutStatus - }{ + tests := []testCase{ { - name: "test rollout all with timeout 90s", + name: "test rollout all with timeout 90s witout workload created", rolloutStrategy: RolloutStrategy{Type: All, All: &RolloutAll{Timeout: Timeout{"90s"}}}, existingScheduledClusterGroups: map[clusterv1beta1.GroupKey]sets.Set[string]{ {GroupName: "group1", GroupIndex: 0}: sets.New[string]("cluster1", "cluster2"), {GroupName: "", GroupIndex: 1}: sets.New[string]("cluster3", "cluster4", "cluster5", "cluster6"), }, - clusterRolloutStatusFunc: func(clusterName string) ClusterRolloutStatus { - clustersRolloutStatus := map[string]ClusterRolloutStatus{ - "cluster1": {Status: ToApply, LastTransitionTime: &fakeTime_60s}, - "cluster2": {Status: Progressing, LastTransitionTime: &fakeTime_60s}, - "cluster3": {Status: Succeeded, LastTransitionTime: &fakeTime_60s}, - "cluster4": {Status: Failed, LastTransitionTime: &fakeTime_60s}, - "cluster5": {Status: Failed, LastTransitionTime: &fakeTime_120s}, - "cluster6": {}, - } - return clustersRolloutStatus[clusterName] - }, - expectRolloutStrategy: &RolloutStrategy{Type: All, All: &RolloutAll{Timeout: Timeout{"90s"}}}, - expectRolloutClusters: map[string]ClusterRolloutStatus{ - "cluster1": {GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: ToApply, LastTransitionTime: &fakeTime_60s}, - "cluster2": {GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: Progressing, LastTransitionTime: &fakeTime_60s, TimeOutTime: &fakeTime30s}, - "cluster4": {GroupKey: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}, Status: Failed, LastTransitionTime: &fakeTime_60s, TimeOutTime: &fakeTime30s}, - "cluster6": {GroupKey: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}}, - }, - expectTimeOutClusters: map[string]ClusterRolloutStatus{ - "cluster5": {GroupKey: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}, Status: TimeOut, LastTransitionTime: &fakeTime_120s, TimeOutTime: &fakeTime_30s}, + clusterRolloutStatusFunc: dummyWorkloadClusterRolloutStatusFunc, + existingWorkloads: []dummyWorkload{}, + expectRolloutStrategy: &RolloutStrategy{Type: All, All: &RolloutAll{Timeout: Timeout{"90s"}}}, + expectRolloutClusters: []ClusterRolloutStatus{ + {ClusterName: "cluster1", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: ToApply}, + {ClusterName: "cluster2", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: ToApply}, + {ClusterName: "cluster3", GroupKey: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}, Status: ToApply}, + {ClusterName: "cluster4", GroupKey: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}, Status: ToApply}, + {ClusterName: "cluster5", GroupKey: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}, Status: ToApply}, + {ClusterName: "cluster6", GroupKey: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}, Status: ToApply}, }, }, { - name: "test rollout all (default timeout None)", - rolloutStrategy: RolloutStrategy{Type: All}, + name: "test rollout all with timeout 90s", + rolloutStrategy: RolloutStrategy{Type: All, All: &RolloutAll{Timeout: Timeout{"90s"}}}, existingScheduledClusterGroups: map[clusterv1beta1.GroupKey]sets.Set[string]{ {GroupName: "group1", GroupIndex: 0}: sets.New[string]("cluster1", "cluster2"), - {GroupName: "", GroupIndex: 1}: sets.New[string]("cluster3", "cluster4", "cluster5"), - }, - clusterRolloutStatusFunc: func(clusterName string) ClusterRolloutStatus { - clustersRolloutStatus := map[string]ClusterRolloutStatus{ - "cluster1": {Status: ToApply}, - "cluster2": {Status: Progressing}, - "cluster3": {Status: Succeeded}, - "cluster4": {Status: Failed}, - "cluster5": {}, - } - return clustersRolloutStatus[clusterName] - }, - expectRolloutStrategy: &RolloutStrategy{Type: All, All: &RolloutAll{Timeout: Timeout{""}}}, - expectRolloutClusters: map[string]ClusterRolloutStatus{ - "cluster1": {GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: ToApply}, - "cluster2": {GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: Progressing, TimeOutTime: &fakeTimeMax}, - "cluster4": {GroupKey: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}, Status: Failed, TimeOutTime: &fakeTimeMax}, - "cluster5": {GroupKey: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}}, + {GroupName: "", GroupIndex: 1}: sets.New[string]("cluster3", "cluster4", "cluster5", "cluster6"), }, - expectTimeOutClusters: map[string]ClusterRolloutStatus{}, - }, - { - name: "test rollout all with timeout 0s", - rolloutStrategy: RolloutStrategy{Type: All, All: &RolloutAll{Timeout: Timeout{"0s"}}}, - existingScheduledClusterGroups: map[clusterv1beta1.GroupKey]sets.Set[string]{ - {GroupName: "group1", GroupIndex: 0}: sets.New[string]("cluster1", "cluster2"), - {GroupName: "", GroupIndex: 1}: sets.New[string]("cluster3", "cluster4", "cluster5"), - }, - clusterRolloutStatusFunc: func(clusterName string) ClusterRolloutStatus { - clustersRolloutStatus := map[string]ClusterRolloutStatus{ - "cluster1": {Status: ToApply}, - "cluster2": {Status: Progressing}, - "cluster3": {Status: Succeeded}, - "cluster4": {Status: Failed}, - "cluster5": {}, - } - return clustersRolloutStatus[clusterName] + clusterRolloutStatusFunc: dummyWorkloadClusterRolloutStatusFunc, + existingWorkloads: []dummyWorkload{ + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster1", + State: done, + LastTransitionTime: &fakeTime_60s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster2", + State: missing, + LastTransitionTime: &fakeTime_120s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}, + ClusterName: "cluster3", + State: applying, + LastTransitionTime: &fakeTime_120s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}, + ClusterName: "cluster4", + State: missing, + LastTransitionTime: &fakeTime_60s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}, + ClusterName: "cluster5", + State: applying, + LastTransitionTime: &fakeTime_60s, + }, }, - expectRolloutStrategy: &RolloutStrategy{Type: All, All: &RolloutAll{Timeout: Timeout{"0s"}}}, - expectRolloutClusters: map[string]ClusterRolloutStatus{ - "cluster1": {GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: ToApply}, - "cluster5": {GroupKey: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}}, + expectRolloutStrategy: &RolloutStrategy{Type: All, All: &RolloutAll{Timeout: Timeout{"90s"}}}, + expectRolloutClusters: []ClusterRolloutStatus{ + {ClusterName: "cluster4", GroupKey: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}, Status: Failed, LastTransitionTime: &fakeTime_60s, TimeOutTime: &fakeTime30s}, + {ClusterName: "cluster5", GroupKey: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}, Status: Progressing, LastTransitionTime: &fakeTime_60s, TimeOutTime: &fakeTime30s}, + {ClusterName: "cluster6", GroupKey: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}, Status: ToApply}, }, - expectTimeOutClusters: map[string]ClusterRolloutStatus{ - "cluster2": {GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: TimeOut, TimeOutTime: &fakeTime}, - "cluster4": {GroupKey: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}, Status: TimeOut, TimeOutTime: &fakeTime}, + expectTimeOutClusters: []ClusterRolloutStatus{ + {ClusterName: "cluster2", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: TimeOut, LastTransitionTime: &fakeTime_120s, TimeOutTime: &fakeTime_30s}, + {ClusterName: "cluster3", GroupKey: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}, Status: TimeOut, LastTransitionTime: &fakeTime_120s, TimeOutTime: &fakeTime_30s}, }, }, } @@ -132,8 +156,14 @@ func TestGetRolloutCluster_All(t *testing.T) { fakeGetter := FakePlacementDecisionGetter{} tracker := clusterv1beta1.NewPlacementDecisionClustersTrackerWithGroups(nil, &fakeGetter, test.existingScheduledClusterGroups) - rolloutHandler, _ := NewRolloutHandler(tracker) - actualRolloutStrategy, actualRolloutResult, _ := rolloutHandler.GetRolloutCluster(test.rolloutStrategy, test.clusterRolloutStatusFunc) + rolloutHandler, _ := NewRolloutHandler(tracker, test.clusterRolloutStatusFunc) + existingRolloutClusters := []ClusterRolloutStatus{} + for _, workload := range test.existingWorkloads { + clsRolloutStatus, _ := test.clusterRolloutStatusFunc(workload.ClusterName, workload) + existingRolloutClusters = append(existingRolloutClusters, clsRolloutStatus) + } + + actualRolloutStrategy, actualRolloutResult, _ := rolloutHandler.GetRolloutCluster(test.rolloutStrategy, existingRolloutClusters) if !reflect.DeepEqual(actualRolloutStrategy.All, test.expectRolloutStrategy.All) { t.Errorf("Case: %v, Failed to run NewRolloutHandler. Expect strategy : %v, actual : %v", test.name, test.expectRolloutStrategy, actualRolloutStrategy) @@ -147,136 +177,316 @@ func TestGetRolloutCluster_All(t *testing.T) { t.Errorf("Case: %v, Failed to run NewRolloutHandler. Expect timeout clusters: %v, actual : %v", test.name, test.expectTimeOutClusters, actualRolloutResult.ClustersTimeOut) return } + if !reflect.DeepEqual(actualRolloutResult.ClustersRemoved, test.expectRemovedClusters) { + t.Errorf("Case: %v, Failed to run NewRolloutHandler. Expect removed clusters: %v, actual : %v", test.name, test.expectRemovedClusters, actualRolloutResult.ClustersRemoved) + return + } } } func TestGetRolloutCluster_Progressive(t *testing.T) { - tests := []struct { - name string - rolloutStrategy RolloutStrategy - existingScheduledClusterGroups map[clusterv1beta1.GroupKey]sets.Set[string] - clusterRolloutStatusFunc ClusterRolloutStatusFunc - expectRolloutStrategy *RolloutStrategy - expectRolloutClusters map[string]ClusterRolloutStatus - expectTimeOutClusters map[string]ClusterRolloutStatus - }{ + tests := []testCase{ { - name: "test progressive rollout with timeout 90s", + name: "test progressive rollout with timeout 90s witout workload created", rolloutStrategy: RolloutStrategy{ Type: Progressive, Progressive: &RolloutProgressive{ - Timeout: Timeout{"90s"}, + Timeout: Timeout{"90s"}, + MaxConcurrency: intstr.FromInt(2), }, }, existingScheduledClusterGroups: map[clusterv1beta1.GroupKey]sets.Set[string]{ - {GroupName: "group1", GroupIndex: 0}: sets.New[string]("cluster1", "cluster2"), - {GroupName: "", GroupIndex: 1}: sets.New[string]("cluster3", "cluster4", "cluster5", "cluster6"), + {GroupName: "group1", GroupIndex: 0}: sets.New[string]("cluster1", "cluster2", "cluster3"), + {GroupName: "", GroupIndex: 1}: sets.New[string]("cluster4", "cluster5", "cluster6"), }, - clusterRolloutStatusFunc: func(clusterName string) ClusterRolloutStatus { - clustersRolloutStatus := map[string]ClusterRolloutStatus{ - "cluster1": {Status: ToApply, LastTransitionTime: &fakeTime_60s}, - "cluster2": {Status: Progressing, LastTransitionTime: &fakeTime_60s}, - "cluster3": {Status: Succeeded, LastTransitionTime: &fakeTime_60s}, - "cluster4": {Status: Failed, LastTransitionTime: &fakeTime_60s}, - "cluster5": {Status: Failed, LastTransitionTime: &fakeTime_120s}, - "cluster6": {}, - } - return clustersRolloutStatus[clusterName] + clusterRolloutStatusFunc: dummyWorkloadClusterRolloutStatusFunc, + expectRolloutStrategy: &RolloutStrategy{ + Type: Progressive, + Progressive: &RolloutProgressive{ + Timeout: Timeout{"90s"}, + MaxConcurrency: intstr.FromInt(2), + }, + }, + existingWorkloads: []dummyWorkload{}, + expectRolloutClusters: []ClusterRolloutStatus{ + {ClusterName: "cluster1", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: ToApply}, + {ClusterName: "cluster2", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: ToApply}, + }, + }, + { + name: "test progressive rollout with timeout 90s and workload clusterRollOutStatus are in ToApply status", + rolloutStrategy: RolloutStrategy{ + Type: Progressive, + Progressive: &RolloutProgressive{ + MaxConcurrency: intstr.FromInt(4), + Timeout: Timeout{"90s"}, + }, + }, + existingScheduledClusterGroups: map[clusterv1beta1.GroupKey]sets.Set[string]{ + {GroupName: "group1", GroupIndex: 0}: sets.New[string]("cluster1", "cluster2", "cluster3"), + {GroupName: "", GroupIndex: 1}: sets.New[string]("cluster4", "cluster5", "cluster6"), + {GroupName: "", GroupIndex: 2}: sets.New[string]("cluster7", "cluster8", "cluster9"), + }, + clusterRolloutStatusFunc: dummyWorkloadClusterRolloutStatusFunc, + expectRolloutStrategy: &RolloutStrategy{ + Type: Progressive, + Progressive: &RolloutProgressive{ + MaxConcurrency: intstr.FromInt(4), + Timeout: Timeout{"90s"}, + }, }, + existingWorkloads: []dummyWorkload{ + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster1", + State: applying, + LastTransitionTime: &fakeTime_60s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster2", + State: valid, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster3", + State: valid, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupIndex: 1}, + ClusterName: "cluster4", + State: valid, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupIndex: 1}, + ClusterName: "cluster5", + State: valid, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupIndex: 1}, + ClusterName: "cluster6", + State: valid, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupIndex: 2}, + ClusterName: "cluster7", + State: valid, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupIndex: 2}, + ClusterName: "cluster8", + State: valid, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupIndex: 2}, + ClusterName: "cluster9", + State: valid, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupIndex: 2}, + ClusterName: "cluster10", + State: valid, + }, + }, + expectRolloutClusters: []ClusterRolloutStatus{ + {ClusterName: "cluster1", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: Progressing, LastTransitionTime: &fakeTime_60s, TimeOutTime: &fakeTime30s}, + {ClusterName: "cluster2", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: ToApply}, + {ClusterName: "cluster3", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: ToApply}, + {ClusterName: "cluster4", GroupKey: clusterv1beta1.GroupKey{GroupIndex: 1}, Status: ToApply}, + }, + expectRemovedClusters: []ClusterRolloutStatus{ + {ClusterName: "cluster10", GroupKey: clusterv1beta1.GroupKey{GroupIndex: 2}, Status: ToApply}, + }, + }, + { + name: "test progressive rollout with timeout 90s and MaxConcurrency not set", + rolloutStrategy: RolloutStrategy{ + Type: Progressive, + Progressive: &RolloutProgressive{ + Timeout: Timeout{"90s"}, + }, + }, + existingScheduledClusterGroups: map[clusterv1beta1.GroupKey]sets.Set[string]{ + {GroupName: "group1", GroupIndex: 0}: sets.New[string]("cluster1", "cluster2", "cluster3"), + {GroupName: "", GroupIndex: 1}: sets.New[string]("cluster4", "cluster5", "cluster6"), + {GroupName: "", GroupIndex: 2}: sets.New[string]("cluster7", "cluster8", "cluster9"), + }, + clusterRolloutStatusFunc: dummyWorkloadClusterRolloutStatusFunc, expectRolloutStrategy: &RolloutStrategy{ Type: Progressive, Progressive: &RolloutProgressive{ Timeout: Timeout{"90s"}, }, }, - expectRolloutClusters: map[string]ClusterRolloutStatus{ - "cluster1": {GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: ToApply, LastTransitionTime: &fakeTime_60s}, - "cluster2": {GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: Progressing, LastTransitionTime: &fakeTime_60s, TimeOutTime: &fakeTime30s}, - "cluster4": {GroupKey: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}, Status: Failed, LastTransitionTime: &fakeTime_60s, TimeOutTime: &fakeTime30s}, - "cluster6": {GroupKey: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}}, + existingWorkloads: []dummyWorkload{ + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster1", + State: applying, + LastTransitionTime: &fakeTime_60s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster2", + State: valid, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster3", + State: valid, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupIndex: 1}, + ClusterName: "cluster4", + State: valid, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupIndex: 1}, + ClusterName: "cluster5", + State: valid, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupIndex: 1}, + ClusterName: "cluster6", + State: valid, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupIndex: 2}, + ClusterName: "cluster7", + State: valid, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupIndex: 2}, + ClusterName: "cluster8", + State: valid, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupIndex: 2}, + ClusterName: "cluster9", + State: valid, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupIndex: 2}, + ClusterName: "cluster10", + State: valid, + }, + }, + expectRolloutClusters: []ClusterRolloutStatus{ + {ClusterName: "cluster1", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: Progressing, LastTransitionTime: &fakeTime_60s, TimeOutTime: &fakeTime30s}, + {ClusterName: "cluster2", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: ToApply}, + {ClusterName: "cluster3", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: ToApply}, + {ClusterName: "cluster4", GroupKey: clusterv1beta1.GroupKey{GroupIndex: 1}, Status: ToApply}, + {ClusterName: "cluster5", GroupKey: clusterv1beta1.GroupKey{GroupIndex: 1}, Status: ToApply}, + {ClusterName: "cluster6", GroupKey: clusterv1beta1.GroupKey{GroupIndex: 1}, Status: ToApply}, + {ClusterName: "cluster7", GroupKey: clusterv1beta1.GroupKey{GroupIndex: 2}, Status: ToApply}, + {ClusterName: "cluster8", GroupKey: clusterv1beta1.GroupKey{GroupIndex: 2}, Status: ToApply}, + {ClusterName: "cluster9", GroupKey: clusterv1beta1.GroupKey{GroupIndex: 2}, Status: ToApply}, }, - expectTimeOutClusters: map[string]ClusterRolloutStatus{ - "cluster5": {GroupKey: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}, Status: TimeOut, LastTransitionTime: &fakeTime_120s, TimeOutTime: &fakeTime_30s}, + expectRemovedClusters: []ClusterRolloutStatus{ + {ClusterName: "cluster10", GroupKey: clusterv1beta1.GroupKey{GroupIndex: 2}, Status: ToApply}, }, }, { - name: "test progressive rollout with timeout None and MaxConcurrency 50%", + name: "test progressive rollout with timeout 90s", rolloutStrategy: RolloutStrategy{ Type: Progressive, Progressive: &RolloutProgressive{ - Timeout: Timeout{""}, - MaxConcurrency: intstr.FromString("50%"), // 50% of total clusters + Timeout: Timeout{"90s"}, + MaxConcurrency: intstr.FromInt(2), }, }, existingScheduledClusterGroups: map[clusterv1beta1.GroupKey]sets.Set[string]{ - {GroupName: "group1", GroupIndex: 0}: sets.New[string]("cluster1", "cluster2"), - {GroupName: "", GroupIndex: 1}: sets.New[string]("cluster3", "cluster4", "cluster5"), - }, - clusterRolloutStatusFunc: func(clusterName string) ClusterRolloutStatus { - clustersRolloutStatus := map[string]ClusterRolloutStatus{ - "cluster1": {Status: ToApply}, - "cluster2": {Status: Progressing}, - "cluster3": {Status: Succeeded}, - "cluster4": {Status: Failed}, - "cluster5": {}, - } - return clustersRolloutStatus[clusterName] + {GroupName: "group1", GroupIndex: 0}: sets.New[string]("cluster1", "cluster2", "cluster3"), + {GroupName: "", GroupIndex: 1}: sets.New[string]("cluster4", "cluster5", "cluster6"), }, + clusterRolloutStatusFunc: dummyWorkloadClusterRolloutStatusFunc, expectRolloutStrategy: &RolloutStrategy{ Type: Progressive, Progressive: &RolloutProgressive{ - Timeout: Timeout{""}, - MaxConcurrency: intstr.FromString("50%"), // 50% of total clusters + Timeout: Timeout{"90s"}, + MaxConcurrency: intstr.FromInt(2), + }, + }, + existingWorkloads: []dummyWorkload{ + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster1", + State: done, + LastTransitionTime: &fakeTime_60s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster2", + State: missing, + LastTransitionTime: &fakeTime_120s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster3", + State: applying, + LastTransitionTime: &fakeTime_60s, }, }, - expectRolloutClusters: map[string]ClusterRolloutStatus{ - "cluster1": {GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: ToApply}, - "cluster2": {GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: Progressing, TimeOutTime: &fakeTimeMax}, - "cluster4": {GroupKey: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}, Status: Failed, TimeOutTime: &fakeTimeMax}, + expectRolloutClusters: []ClusterRolloutStatus{ + {ClusterName: "cluster3", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: Progressing, LastTransitionTime: &fakeTime_60s, TimeOutTime: &fakeTime30s}, + {ClusterName: "cluster4", GroupKey: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}, Status: ToApply}, + }, + expectTimeOutClusters: []ClusterRolloutStatus{ + {ClusterName: "cluster2", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: TimeOut, LastTransitionTime: &fakeTime_120s, TimeOutTime: &fakeTime_30s}, }, - expectTimeOutClusters: map[string]ClusterRolloutStatus{}, }, { - name: "test progressive rollout with timeout 0s and MaxConcurrency 3", + name: "test progressive rollout with timeout 0s", rolloutStrategy: RolloutStrategy{ Type: Progressive, Progressive: &RolloutProgressive{ Timeout: Timeout{"0s"}, - MaxConcurrency: intstr.FromInt(3), // Maximum 3 clusters concurrently + MaxConcurrency: intstr.FromInt(2), }, }, existingScheduledClusterGroups: map[clusterv1beta1.GroupKey]sets.Set[string]{ - {GroupName: "group1", GroupIndex: 0}: sets.New[string]("cluster1", "cluster2"), - {GroupName: "", GroupIndex: 1}: sets.New[string]("cluster3", "cluster4", "cluster5"), - }, - clusterRolloutStatusFunc: func(clusterName string) ClusterRolloutStatus { - clustersRolloutStatus := map[string]ClusterRolloutStatus{ - "cluster1": {Status: ToApply}, - "cluster2": {Status: Progressing}, - "cluster3": {Status: Succeeded}, - "cluster4": {Status: Failed}, - "cluster5": {}, - } - return clustersRolloutStatus[clusterName] + {GroupName: "group1", GroupIndex: 0}: sets.New[string]("cluster1", "cluster2", "cluster3"), + {GroupName: "", GroupIndex: 1}: sets.New[string]("cluster4", "cluster5", "cluster6"), }, + clusterRolloutStatusFunc: dummyWorkloadClusterRolloutStatusFunc, expectRolloutStrategy: &RolloutStrategy{ Type: Progressive, Progressive: &RolloutProgressive{ Timeout: Timeout{"0s"}, - MaxConcurrency: intstr.FromInt(3), // Maximum 3 clusters concurrently + MaxConcurrency: intstr.FromInt(2), + }, + }, + existingWorkloads: []dummyWorkload{ + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster1", + State: done, + LastTransitionTime: &fakeTime_30s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster2", + State: applying, + LastTransitionTime: &fakeTime_30s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster3", + State: applying, + LastTransitionTime: &fakeTime_30s, }, }, - expectRolloutClusters: map[string]ClusterRolloutStatus{ - "cluster1": {GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: ToApply}, - "cluster5": {GroupKey: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}}, + expectRolloutClusters: []ClusterRolloutStatus{ + {ClusterName: "cluster4", GroupKey: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}, Status: ToApply}, + {ClusterName: "cluster5", GroupKey: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}, Status: ToApply}, }, - expectTimeOutClusters: map[string]ClusterRolloutStatus{ - "cluster2": {GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: TimeOut, TimeOutTime: &fakeTime}, - "cluster4": {GroupKey: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}, Status: TimeOut, TimeOutTime: &fakeTime}, + expectTimeOutClusters: []ClusterRolloutStatus{ + {ClusterName: "cluster2", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: TimeOut, LastTransitionTime: &fakeTime_30s, TimeOutTime: &fakeTime_30s}, + {ClusterName: "cluster3", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: TimeOut, LastTransitionTime: &fakeTime_30s, TimeOutTime: &fakeTime_30s}, }, }, { - name: "test progressive rollout with mandatory decision groups", + name: "test progressive rollout with mandatroyDecisionGroup and timeout 90s ", rolloutStrategy: RolloutStrategy{ Type: Progressive, Progressive: &RolloutProgressive{ @@ -285,23 +495,15 @@ func TestGetRolloutCluster_Progressive(t *testing.T) { {GroupName: "group1"}, }, }, - MaxConcurrency: intstr.FromString("50%"), + Timeout: Timeout{"90s"}, + MaxConcurrency: intstr.FromInt(3), }, }, existingScheduledClusterGroups: map[clusterv1beta1.GroupKey]sets.Set[string]{ - {GroupName: "group1", GroupIndex: 0}: sets.New[string]("cluster1", "cluster2"), - {GroupName: "", GroupIndex: 1}: sets.New[string]("cluster3", "cluster4", "cluster5"), - }, - clusterRolloutStatusFunc: func(clusterName string) ClusterRolloutStatus { - clustersRolloutStatus := map[string]ClusterRolloutStatus{ - "cluster1": {Status: ToApply}, - "cluster2": {Status: ToApply}, - "cluster3": {Status: ToApply}, - "cluster4": {Status: ToApply}, - "cluster5": {Status: ToApply}, - } - return clustersRolloutStatus[clusterName] + {GroupName: "group1", GroupIndex: 0}: sets.New[string]("cluster1", "cluster2", "cluster3"), + {GroupName: "", GroupIndex: 1}: sets.New[string]("cluster4", "cluster5", "cluster6"), }, + clusterRolloutStatusFunc: dummyWorkloadClusterRolloutStatusFunc, expectRolloutStrategy: &RolloutStrategy{ Type: Progressive, Progressive: &RolloutProgressive{ @@ -310,18 +512,86 @@ func TestGetRolloutCluster_Progressive(t *testing.T) { {GroupName: "group1"}, }, }, - MaxConcurrency: intstr.FromString("50%"), - Timeout: Timeout{""}, + Timeout: Timeout{"90s"}, + MaxConcurrency: intstr.FromInt(3), + }, + }, + existingWorkloads: []dummyWorkload{ + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster1", + State: done, + LastTransitionTime: &fakeTime_60s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster2", + State: missing, + LastTransitionTime: &fakeTime_120s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster3", + State: applying, + LastTransitionTime: &fakeTime_60s, }, }, - expectRolloutClusters: map[string]ClusterRolloutStatus{ - "cluster1": {GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: ToApply}, - "cluster2": {GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: ToApply}, + expectRolloutClusters: []ClusterRolloutStatus{ + {ClusterName: "cluster3", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: Progressing, LastTransitionTime: &fakeTime_60s, TimeOutTime: &fakeTime30s}, + }, + expectTimeOutClusters: []ClusterRolloutStatus{ + {ClusterName: "cluster2", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: TimeOut, LastTransitionTime: &fakeTime_120s, TimeOutTime: &fakeTime_30s}, }, - expectTimeOutClusters: map[string]ClusterRolloutStatus{}, }, { - name: "test progressive rollout with mandatory decision groups Succeed", + name: "test progressive rollout with timeout None and MaxConcurrency 50%", + rolloutStrategy: RolloutStrategy{ + Type: Progressive, + Progressive: &RolloutProgressive{ + Timeout: Timeout{"None"}, + MaxConcurrency: intstr.FromString("50%"), // 50% of total clusters + }, + }, + existingScheduledClusterGroups: map[clusterv1beta1.GroupKey]sets.Set[string]{ + {GroupName: "group1", GroupIndex: 0}: sets.New[string]("cluster1", "cluster2"), + {GroupName: "", GroupIndex: 1}: sets.New[string]("cluster3", "cluster4", "cluster5", "cluster6"), + }, + clusterRolloutStatusFunc: dummyWorkloadClusterRolloutStatusFunc, + expectRolloutStrategy: &RolloutStrategy{ + Type: Progressive, + Progressive: &RolloutProgressive{ + Timeout: Timeout{"None"}, + MaxConcurrency: intstr.FromString("50%"), // 50% of total clusters + }, + }, + existingWorkloads: []dummyWorkload{ + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster1", + State: done, + LastTransitionTime: &fakeTime_60s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster2", + State: missing, + LastTransitionTime: &fakeTime_120s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupIndex: 1}, + ClusterName: "cluster3", + State: applying, + LastTransitionTime: &fakeTime_60s, + }, + }, + expectRolloutClusters: []ClusterRolloutStatus{ + {ClusterName: "cluster2", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: Failed, LastTransitionTime: &fakeTime_120s, TimeOutTime: &fakeTimeMax_120s}, + {ClusterName: "cluster3", GroupKey: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}, Status: Progressing, LastTransitionTime: &fakeTime_60s, TimeOutTime: &fakeTimeMax_60s}, + {ClusterName: "cluster4", GroupKey: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}, Status: ToApply}, + }, + }, + { + name: "test progressive rollout with mandatory decision groups failed", rolloutStrategy: RolloutStrategy{ Type: Progressive, Progressive: &RolloutProgressive{ @@ -330,24 +600,16 @@ func TestGetRolloutCluster_Progressive(t *testing.T) { {GroupName: "group1"}, }, }, - MaxConcurrency: intstr.FromInt(2), + MaxConcurrency: intstr.FromInt(3), + Timeout: Timeout{"90s"}, }, }, existingScheduledClusterGroups: map[clusterv1beta1.GroupKey]sets.Set[string]{ {GroupName: "group1", GroupIndex: 0}: sets.New[string]("cluster1", "cluster2"), - {GroupName: "", GroupIndex: 1}: sets.New[string]("cluster3", "cluster4", "cluster5", "cluster6"), - }, - clusterRolloutStatusFunc: func(clusterName string) ClusterRolloutStatus { - clustersRolloutStatus := map[string]ClusterRolloutStatus{ - "cluster1": {Status: Succeeded}, - "cluster2": {Status: Succeeded}, - "cluster3": {Status: ToApply}, - "cluster4": {Status: Succeeded}, - "cluster5": {Status: ToApply}, - "cluster6": {Status: ToApply}, - } - return clustersRolloutStatus[clusterName] + {GroupName: "", GroupIndex: 1}: sets.New[string]("cluster3", "cluster4"), + {GroupName: "", GroupIndex: 2}: sets.New[string]("cluster5", "cluster6"), }, + clusterRolloutStatusFunc: dummyWorkloadClusterRolloutStatusFunc, expectRolloutStrategy: &RolloutStrategy{ Type: Progressive, Progressive: &RolloutProgressive{ @@ -356,18 +618,33 @@ func TestGetRolloutCluster_Progressive(t *testing.T) { {GroupName: "group1"}, }, }, - MaxConcurrency: intstr.FromInt(2), - Timeout: Timeout{""}, + MaxConcurrency: intstr.FromInt(3), + Timeout: Timeout{"90s"}, + }, + }, + existingWorkloads: []dummyWorkload{ + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster1", + State: missing, + LastTransitionTime: &fakeTime_120s, }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster2", + State: missing, + LastTransitionTime: &fakeTime_60s, + }, + }, + expectRolloutClusters: []ClusterRolloutStatus{ + {ClusterName: "cluster2", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: Failed, LastTransitionTime: &fakeTime_60s, TimeOutTime: &fakeTime30s}, }, - expectRolloutClusters: map[string]ClusterRolloutStatus{ - "cluster3": {GroupKey: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}, Status: ToApply}, - "cluster5": {GroupKey: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}, Status: ToApply}, + expectTimeOutClusters: []ClusterRolloutStatus{ + {ClusterName: "cluster1", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: TimeOut, LastTransitionTime: &fakeTime_120s, TimeOutTime: &fakeTime_30s}, }, - expectTimeOutClusters: map[string]ClusterRolloutStatus{}, }, { - name: "test progressive rollout with mandatory decision groups failed", + name: "test progressive rollout with mandatory decision groups Succeed", rolloutStrategy: RolloutStrategy{ Type: Progressive, Progressive: &RolloutProgressive{ @@ -376,24 +653,15 @@ func TestGetRolloutCluster_Progressive(t *testing.T) { {GroupName: "group1"}, }, }, - MaxConcurrency: intstr.FromString("50%"), // 50% of total clusters - Timeout: Timeout{"0s"}, + MaxConcurrency: intstr.FromInt(3), }, }, existingScheduledClusterGroups: map[clusterv1beta1.GroupKey]sets.Set[string]{ {GroupName: "group1", GroupIndex: 0}: sets.New[string]("cluster1", "cluster2"), - {GroupName: "", GroupIndex: 1}: sets.New[string]("cluster3", "cluster4", "cluster5"), - }, - clusterRolloutStatusFunc: func(clusterName string) ClusterRolloutStatus { - clustersRolloutStatus := map[string]ClusterRolloutStatus{ - "cluster1": {Status: Failed}, - "cluster2": {Status: Failed}, - "cluster3": {Status: ToApply}, - "cluster4": {Status: ToApply}, - "cluster5": {Status: ToApply}, - } - return clustersRolloutStatus[clusterName] + {GroupName: "", GroupIndex: 1}: sets.New[string]("cluster3", "cluster4"), + {GroupName: "", GroupIndex: 2}: sets.New[string]("cluster5", "cluster6"), }, + clusterRolloutStatusFunc: dummyWorkloadClusterRolloutStatusFunc, expectRolloutStrategy: &RolloutStrategy{ Type: Progressive, Progressive: &RolloutProgressive{ @@ -402,15 +670,29 @@ func TestGetRolloutCluster_Progressive(t *testing.T) { {GroupName: "group1"}, }, }, - MaxConcurrency: intstr.FromString("50%"), // 50% of total clusters - Timeout: Timeout{"0s"}, + MaxConcurrency: intstr.FromInt(3), + Timeout: Timeout{""}, + }, + }, + existingWorkloads: []dummyWorkload{ + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster1", + State: done, + LastTransitionTime: &fakeTime_60s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster2", + State: done, + LastTransitionTime: &fakeTime_120s, }, }, - expectRolloutClusters: map[string]ClusterRolloutStatus{ - "cluster1": {GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: Failed, TimeOutTime: &fakeTimeMax}, - "cluster2": {GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: Failed, TimeOutTime: &fakeTimeMax}, + expectRolloutClusters: []ClusterRolloutStatus{ + {ClusterName: "cluster3", GroupKey: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}, Status: ToApply}, + {ClusterName: "cluster4", GroupKey: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}, Status: ToApply}, + {ClusterName: "cluster5", GroupKey: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 2}, Status: ToApply}, }, - expectTimeOutClusters: map[string]ClusterRolloutStatus{}, }, } @@ -422,8 +704,14 @@ func TestGetRolloutCluster_Progressive(t *testing.T) { fakeGetter := FakePlacementDecisionGetter{} tracker := clusterv1beta1.NewPlacementDecisionClustersTrackerWithGroups(nil, &fakeGetter, test.existingScheduledClusterGroups) - rolloutHandler, _ := NewRolloutHandler(tracker) - actualRolloutStrategy, actualRolloutResult, _ := rolloutHandler.GetRolloutCluster(test.rolloutStrategy, test.clusterRolloutStatusFunc) + rolloutHandler, _ := NewRolloutHandler(tracker, test.clusterRolloutStatusFunc) + existingRolloutClusters := []ClusterRolloutStatus{} + for _, workload := range test.existingWorkloads { + clsRolloutStatus, _ := test.clusterRolloutStatusFunc(workload.ClusterName, workload) + existingRolloutClusters = append(existingRolloutClusters, clsRolloutStatus) + } + + actualRolloutStrategy, actualRolloutResult, _ := rolloutHandler.GetRolloutCluster(test.rolloutStrategy, existingRolloutClusters) if !reflect.DeepEqual(actualRolloutStrategy.Progressive, test.expectRolloutStrategy.Progressive) { t.Errorf("Case: %v, Failed to run NewRolloutHandler. Expect strategy : %v, actual : %v", test.name, test.expectRolloutStrategy, actualRolloutStrategy) @@ -437,21 +725,17 @@ func TestGetRolloutCluster_Progressive(t *testing.T) { t.Errorf("Case: %v, Failed to run NewRolloutHandler. Expect timeout clusters: %v, actual : %v", test.name, test.expectTimeOutClusters, actualRolloutResult.ClustersTimeOut) return } + if !reflect.DeepEqual(actualRolloutResult.ClustersRemoved, test.expectRemovedClusters) { + t.Errorf("Case: %v, Failed to run NewRolloutHandler. Expect removed clusters: %v, actual : %v", test.name, test.expectRemovedClusters, actualRolloutResult.ClustersRemoved) + return + } } } func TestGetRolloutCluster_ProgressivePerGroup(t *testing.T) { - tests := []struct { - name string - rolloutStrategy RolloutStrategy - existingScheduledClusterGroups map[clusterv1beta1.GroupKey]sets.Set[string] - clusterRolloutStatusFunc ClusterRolloutStatusFunc - expectRolloutStrategy *RolloutStrategy - expectRolloutClusters map[string]ClusterRolloutStatus - expectTimeOutClusters map[string]ClusterRolloutStatus - }{ + tests := []testCase{ { - name: "test progressive per group rollout with timeout 90s", + name: "test progressivePerGroup rollout with timeout 90s witout workload created", rolloutStrategy: RolloutStrategy{ Type: ProgressivePerGroup, ProgressivePerGroup: &RolloutProgressivePerGroup{ @@ -459,191 +743,289 @@ func TestGetRolloutCluster_ProgressivePerGroup(t *testing.T) { }, }, existingScheduledClusterGroups: map[clusterv1beta1.GroupKey]sets.Set[string]{ - {GroupName: "group1", GroupIndex: 0}: sets.New[string]("cluster1", "cluster2"), - {GroupName: "", GroupIndex: 1}: sets.New[string]("cluster3", "cluster4", "cluster5", "cluster6"), - }, - clusterRolloutStatusFunc: func(clusterName string) ClusterRolloutStatus { - clustersRolloutStatus := map[string]ClusterRolloutStatus{ - "cluster1": {Status: Failed, LastTransitionTime: &fakeTime_60s}, - "cluster2": {Status: Failed, LastTransitionTime: &fakeTime_120s}, - "cluster3": {Status: ToApply, LastTransitionTime: &fakeTime_60s}, - "cluster4": {Status: ToApply, LastTransitionTime: &fakeTime_60s}, - "cluster5": {Status: ToApply, LastTransitionTime: &fakeTime_60s}, - "cluster6": {}, - } - return clustersRolloutStatus[clusterName] + {GroupName: "group1", GroupIndex: 0}: sets.New[string]("cluster1", "cluster2", "cluster3"), + {GroupName: "", GroupIndex: 1}: sets.New[string]("cluster4", "cluster5", "cluster6"), }, + clusterRolloutStatusFunc: dummyWorkloadClusterRolloutStatusFunc, expectRolloutStrategy: &RolloutStrategy{ Type: ProgressivePerGroup, ProgressivePerGroup: &RolloutProgressivePerGroup{ Timeout: Timeout{"90s"}, }, }, - expectRolloutClusters: map[string]ClusterRolloutStatus{ - "cluster1": {GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: Failed, LastTransitionTime: &fakeTime_60s, TimeOutTime: &fakeTime30s}, - }, - expectTimeOutClusters: map[string]ClusterRolloutStatus{ - "cluster2": {GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: TimeOut, LastTransitionTime: &fakeTime_120s, TimeOutTime: &fakeTime_30s}, + existingWorkloads: []dummyWorkload{}, + expectRolloutClusters: []ClusterRolloutStatus{ + {ClusterName: "cluster1", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: ToApply}, + {ClusterName: "cluster2", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: ToApply}, + {ClusterName: "cluster3", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: ToApply}, }, }, { - name: "test progressive per group rollout with timeout None", + name: "test progressivePerGroup rollout with timeout 90s and all workload clusterRollOutStatus are in ToApply status", rolloutStrategy: RolloutStrategy{ Type: ProgressivePerGroup, + ProgressivePerGroup: &RolloutProgressivePerGroup{ + Timeout: Timeout{"90s"}, + }, }, existingScheduledClusterGroups: map[clusterv1beta1.GroupKey]sets.Set[string]{ - {GroupName: "group1", GroupIndex: 0}: sets.New[string]("cluster1", "cluster2"), - {GroupName: "", GroupIndex: 1}: sets.New[string]("cluster3", "cluster4", "cluster5", "cluster6"), - }, - clusterRolloutStatusFunc: func(clusterName string) ClusterRolloutStatus { - clustersRolloutStatus := map[string]ClusterRolloutStatus{ - "cluster1": {Status: Failed, LastTransitionTime: &fakeTime_60s}, - "cluster2": {Status: Failed, LastTransitionTime: &fakeTime_120s}, - "cluster3": {Status: ToApply, LastTransitionTime: &fakeTime_60s}, - "cluster4": {Status: ToApply, LastTransitionTime: &fakeTime_60s}, - "cluster5": {Status: ToApply, LastTransitionTime: &fakeTime_60s}, - "cluster6": {}, - } - return clustersRolloutStatus[clusterName] + {GroupName: "group1", GroupIndex: 0}: sets.New[string]("cluster1", "cluster2", "cluster3"), + {GroupName: "", GroupIndex: 1}: sets.New[string]("cluster4", "cluster5", "cluster6"), }, + clusterRolloutStatusFunc: dummyWorkloadClusterRolloutStatusFunc, expectRolloutStrategy: &RolloutStrategy{ Type: ProgressivePerGroup, ProgressivePerGroup: &RolloutProgressivePerGroup{ - Timeout: Timeout{""}, + Timeout: Timeout{"90s"}, + }, + }, + existingWorkloads: []dummyWorkload{ + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster1", + State: applying, + LastTransitionTime: &fakeTime_60s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster2", + State: valid, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster3", + State: valid, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupIndex: 1}, + ClusterName: "cluster4", + State: valid, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupIndex: 1}, + ClusterName: "cluster5", + State: valid, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupIndex: 1}, + ClusterName: "cluster6", + State: valid, }, }, - expectRolloutClusters: map[string]ClusterRolloutStatus{ - "cluster1": {GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: Failed, LastTransitionTime: &fakeTime_60s, TimeOutTime: &fakeTimeMax_60s}, - "cluster2": {GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: Failed, LastTransitionTime: &fakeTime_120s, TimeOutTime: &fakeTimeMax_120s}, + expectRolloutClusters: []ClusterRolloutStatus{ + {ClusterName: "cluster1", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: Progressing, LastTransitionTime: &fakeTime_60s, TimeOutTime: &fakeTime30s}, + {ClusterName: "cluster2", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: ToApply}, + {ClusterName: "cluster3", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: ToApply}, }, - expectTimeOutClusters: map[string]ClusterRolloutStatus{}, }, { - name: "test progressive per group rollout with timeout 0s", + name: "test progressivePerGroup rollout with timeout 90s", rolloutStrategy: RolloutStrategy{ Type: ProgressivePerGroup, ProgressivePerGroup: &RolloutProgressivePerGroup{ - Timeout: Timeout{"0s"}, + Timeout: Timeout{"90s"}, }, }, existingScheduledClusterGroups: map[clusterv1beta1.GroupKey]sets.Set[string]{ - {GroupName: "group1", GroupIndex: 0}: sets.New[string]("cluster1", "cluster2"), - {GroupName: "", GroupIndex: 1}: sets.New[string]("cluster3", "cluster4", "cluster5", "cluster6"), - }, - clusterRolloutStatusFunc: func(clusterName string) ClusterRolloutStatus { - clustersRolloutStatus := map[string]ClusterRolloutStatus{ - "cluster1": {Status: Failed, LastTransitionTime: &fakeTime_60s}, - "cluster2": {Status: Failed, LastTransitionTime: &fakeTime_120s}, - "cluster3": {Status: ToApply, LastTransitionTime: &fakeTime_60s}, - "cluster4": {Status: ToApply, LastTransitionTime: &fakeTime_60s}, - "cluster5": {Status: ToApply, LastTransitionTime: &fakeTime_60s}, - "cluster6": {}, - } - return clustersRolloutStatus[clusterName] + {GroupName: "group1", GroupIndex: 0}: sets.New[string]("cluster1", "cluster2", "cluster3"), + {GroupName: "", GroupIndex: 1}: sets.New[string]("cluster4", "cluster5", "cluster6"), }, + clusterRolloutStatusFunc: dummyWorkloadClusterRolloutStatusFunc, expectRolloutStrategy: &RolloutStrategy{ Type: ProgressivePerGroup, ProgressivePerGroup: &RolloutProgressivePerGroup{ - Timeout: Timeout{"0s"}, + Timeout: Timeout{"90s"}, + }, + }, + existingWorkloads: []dummyWorkload{ + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster1", + State: done, + LastTransitionTime: &fakeTime_60s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster2", + State: missing, + LastTransitionTime: &fakeTime_120s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster3", + State: applying, + LastTransitionTime: &fakeTime_60s, }, }, - expectRolloutClusters: map[string]ClusterRolloutStatus{ - "cluster3": {GroupKey: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}, Status: ToApply, LastTransitionTime: &fakeTime_60s}, - "cluster4": {GroupKey: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}, Status: ToApply, LastTransitionTime: &fakeTime_60s}, - "cluster5": {GroupKey: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}, Status: ToApply, LastTransitionTime: &fakeTime_60s}, - "cluster6": {GroupKey: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}}, + expectRolloutClusters: []ClusterRolloutStatus{ + {ClusterName: "cluster3", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: Progressing, LastTransitionTime: &fakeTime_60s, TimeOutTime: &fakeTime30s}, }, - expectTimeOutClusters: map[string]ClusterRolloutStatus{ - "cluster1": {GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: TimeOut, LastTransitionTime: &fakeTime_60s, TimeOutTime: &fakeTime_60s}, - "cluster2": {GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: TimeOut, LastTransitionTime: &fakeTime_120s, TimeOutTime: &fakeTime_120s}, + expectTimeOutClusters: []ClusterRolloutStatus{ + {ClusterName: "cluster2", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: TimeOut, LastTransitionTime: &fakeTime_120s, TimeOutTime: &fakeTime_30s}, }, }, { - name: "test progressive per group rollout with mandatory decision groups", + name: "test progressivePerGroup rollout with timeout 90s and first group timeOut", rolloutStrategy: RolloutStrategy{ Type: ProgressivePerGroup, ProgressivePerGroup: &RolloutProgressivePerGroup{ - MandatoryDecisionGroups: MandatoryDecisionGroups{ - MandatoryDecisionGroups: []MandatoryDecisionGroup{ - {GroupName: "group1"}, - }, - }, + Timeout: Timeout{"90s"}, }, }, existingScheduledClusterGroups: map[clusterv1beta1.GroupKey]sets.Set[string]{ - {GroupName: "group1", GroupIndex: 0}: sets.New[string]("cluster1", "cluster2"), - {GroupName: "", GroupIndex: 1}: sets.New[string]("cluster3", "cluster4", "cluster5"), - }, - clusterRolloutStatusFunc: func(clusterName string) ClusterRolloutStatus { - clustersRolloutStatus := map[string]ClusterRolloutStatus{ - "cluster1": {Status: ToApply}, - "cluster2": {Status: ToApply}, - "cluster3": {Status: ToApply}, - "cluster4": {Status: ToApply}, - "cluster5": {Status: ToApply}, - } - return clustersRolloutStatus[clusterName] + {GroupName: "group1", GroupIndex: 0}: sets.New[string]("cluster1", "cluster2", "cluster3"), + {GroupName: "", GroupIndex: 1}: sets.New[string]("cluster4", "cluster5", "cluster6"), + {GroupName: "", GroupIndex: 2}: sets.New[string]("cluster7", "cluster8", "cluster9"), }, + clusterRolloutStatusFunc: dummyWorkloadClusterRolloutStatusFunc, expectRolloutStrategy: &RolloutStrategy{ Type: ProgressivePerGroup, ProgressivePerGroup: &RolloutProgressivePerGroup{ - MandatoryDecisionGroups: MandatoryDecisionGroups{ - MandatoryDecisionGroups: []MandatoryDecisionGroup{ - {GroupName: "group1"}, - }, - }, + Timeout: Timeout{"90s"}, + }, + }, + existingWorkloads: []dummyWorkload{ + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster1", + State: done, + LastTransitionTime: &fakeTime_60s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster2", + State: missing, + LastTransitionTime: &fakeTime_120s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster3", + State: done, + LastTransitionTime: &fakeTime_60s, }, }, - expectRolloutClusters: map[string]ClusterRolloutStatus{ - "cluster1": {GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: ToApply}, - "cluster2": {GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: ToApply}, + expectRolloutClusters: []ClusterRolloutStatus{ + {ClusterName: "cluster4", GroupKey: clusterv1beta1.GroupKey{GroupIndex: 1}, Status: ToApply}, + {ClusterName: "cluster5", GroupKey: clusterv1beta1.GroupKey{GroupIndex: 1}, Status: ToApply}, + {ClusterName: "cluster6", GroupKey: clusterv1beta1.GroupKey{GroupIndex: 1}, Status: ToApply}, + }, + expectTimeOutClusters: []ClusterRolloutStatus{ + {ClusterName: "cluster2", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: TimeOut, LastTransitionTime: &fakeTime_120s, TimeOutTime: &fakeTime_30s}, }, - expectTimeOutClusters: map[string]ClusterRolloutStatus{}, }, { - name: "test progressive per group rollout with mandatory decision groups Succeed", + name: "test progressivePerGroup rollout with timeout 90s and first group timeOut, second group successed", rolloutStrategy: RolloutStrategy{ Type: ProgressivePerGroup, ProgressivePerGroup: &RolloutProgressivePerGroup{ - MandatoryDecisionGroups: MandatoryDecisionGroups{ - MandatoryDecisionGroups: []MandatoryDecisionGroup{ - {GroupName: "group1"}, - }, - }, + Timeout: Timeout{"90s"}, }, }, existingScheduledClusterGroups: map[clusterv1beta1.GroupKey]sets.Set[string]{ - {GroupName: "group1", GroupIndex: 0}: sets.New[string]("cluster1", "cluster2"), - {GroupName: "", GroupIndex: 1}: sets.New[string]("cluster3", "cluster4"), - {GroupName: "", GroupIndex: 2}: sets.New[string]("cluster5"), - }, - clusterRolloutStatusFunc: func(clusterName string) ClusterRolloutStatus { - clustersRolloutStatus := map[string]ClusterRolloutStatus{ - "cluster1": {Status: Succeeded}, - "cluster2": {Status: Succeeded}, - "cluster3": {Status: ToApply}, - "cluster4": {Status: Succeeded}, - "cluster5": {Status: ToApply}, - } - return clustersRolloutStatus[clusterName] + {GroupName: "group1", GroupIndex: 0}: sets.New[string]("cluster1", "cluster2", "cluster3"), + {GroupName: "", GroupIndex: 1}: sets.New[string]("cluster4", "cluster5", "cluster6"), + {GroupName: "", GroupIndex: 2}: sets.New[string]("cluster7", "cluster8", "cluster9"), }, + clusterRolloutStatusFunc: dummyWorkloadClusterRolloutStatusFunc, expectRolloutStrategy: &RolloutStrategy{ Type: ProgressivePerGroup, ProgressivePerGroup: &RolloutProgressivePerGroup{ - MandatoryDecisionGroups: MandatoryDecisionGroups{ - MandatoryDecisionGroups: []MandatoryDecisionGroup{ - {GroupName: "group1"}, - }, - }, + Timeout: Timeout{"90s"}, }, }, - expectRolloutClusters: map[string]ClusterRolloutStatus{ - "cluster3": {GroupKey: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}, Status: ToApply}, + existingWorkloads: []dummyWorkload{ + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster1", + State: done, + LastTransitionTime: &fakeTime_60s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster2", + State: missing, + LastTransitionTime: &fakeTime_120s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster3", + State: done, + LastTransitionTime: &fakeTime_60s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupIndex: 1}, + ClusterName: "cluster4", + State: done, + LastTransitionTime: &fakeTime_60s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupIndex: 1}, + ClusterName: "cluster5", + State: done, + LastTransitionTime: &fakeTime_60s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupIndex: 1}, + ClusterName: "cluster6", + State: done, + LastTransitionTime: &fakeTime_60s, + }, + }, + expectRolloutClusters: []ClusterRolloutStatus{ + {ClusterName: "cluster7", GroupKey: clusterv1beta1.GroupKey{GroupIndex: 2}, Status: ToApply}, + {ClusterName: "cluster8", GroupKey: clusterv1beta1.GroupKey{GroupIndex: 2}, Status: ToApply}, + {ClusterName: "cluster9", GroupKey: clusterv1beta1.GroupKey{GroupIndex: 2}, Status: ToApply}, + }, + expectTimeOutClusters: []ClusterRolloutStatus{ + {ClusterName: "cluster2", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: TimeOut, LastTransitionTime: &fakeTime_120s, TimeOutTime: &fakeTime_30s}, }, - expectTimeOutClusters: map[string]ClusterRolloutStatus{}, }, { - name: "test progressive per group rollout with mandatory decision groups failed", + name: "test progressivePerGroup rollout with timeout None and first group failing", + rolloutStrategy: RolloutStrategy{ + Type: ProgressivePerGroup, + ProgressivePerGroup: &RolloutProgressivePerGroup{ + Timeout: Timeout{"None"}, + }, + }, + existingScheduledClusterGroups: map[clusterv1beta1.GroupKey]sets.Set[string]{ + {GroupName: "group1", GroupIndex: 0}: sets.New[string]("cluster1", "cluster2", "cluster3"), + {GroupName: "", GroupIndex: 1}: sets.New[string]("cluster4", "cluster5", "cluster6"), + {GroupName: "", GroupIndex: 2}: sets.New[string]("cluster7", "cluster8", "cluster9"), + }, + clusterRolloutStatusFunc: dummyWorkloadClusterRolloutStatusFunc, + expectRolloutStrategy: &RolloutStrategy{ + Type: ProgressivePerGroup, + ProgressivePerGroup: &RolloutProgressivePerGroup{ + Timeout: Timeout{"None"}, + }, + }, + existingWorkloads: []dummyWorkload{ + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster1", + State: done, + LastTransitionTime: &fakeTime_60s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster2", + State: missing, + LastTransitionTime: &fakeTime_120s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster3", + State: done, + LastTransitionTime: &fakeTime_60s, + }, + }, + expectRolloutClusters: []ClusterRolloutStatus{ + {ClusterName: "cluster2", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: Failed, LastTransitionTime: &fakeTime_120s, TimeOutTime: &fakeTimeMax_120s}, + }, + }, + { + name: "test ProgressivePerGroup rollout with mandatroyDecisionGroup failing and timeout 90s ", rolloutStrategy: RolloutStrategy{ Type: ProgressivePerGroup, ProgressivePerGroup: &RolloutProgressivePerGroup{ @@ -652,23 +1034,14 @@ func TestGetRolloutCluster_ProgressivePerGroup(t *testing.T) { {GroupName: "group1"}, }, }, - Timeout: Timeout{"0s"}, + Timeout: Timeout{"90s"}, }, }, existingScheduledClusterGroups: map[clusterv1beta1.GroupKey]sets.Set[string]{ - {GroupName: "group1", GroupIndex: 0}: sets.New[string]("cluster1", "cluster2"), - {GroupName: "", GroupIndex: 1}: sets.New[string]("cluster3", "cluster4", "cluster5"), - }, - clusterRolloutStatusFunc: func(clusterName string) ClusterRolloutStatus { - clustersRolloutStatus := map[string]ClusterRolloutStatus{ - "cluster1": {Status: Failed}, - "cluster2": {Status: Failed}, - "cluster3": {Status: ToApply}, - "cluster4": {Status: ToApply}, - "cluster5": {Status: ToApply}, - } - return clustersRolloutStatus[clusterName] + {GroupName: "group1", GroupIndex: 0}: sets.New[string]("cluster1", "cluster2", "cluster3"), + {GroupName: "", GroupIndex: 1}: sets.New[string]("cluster4", "cluster5", "cluster6"), }, + clusterRolloutStatusFunc: dummyWorkloadClusterRolloutStatusFunc, expectRolloutStrategy: &RolloutStrategy{ Type: ProgressivePerGroup, ProgressivePerGroup: &RolloutProgressivePerGroup{ @@ -677,14 +1050,89 @@ func TestGetRolloutCluster_ProgressivePerGroup(t *testing.T) { {GroupName: "group1"}, }, }, - Timeout: Timeout{"0s"}, + Timeout: Timeout{"90s"}, + }, + }, + existingWorkloads: []dummyWorkload{ + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster1", + State: done, + LastTransitionTime: &fakeTime_60s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster2", + State: missing, + LastTransitionTime: &fakeTime_120s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster3", + State: missing, + LastTransitionTime: &fakeTime_120s, }, }, - expectRolloutClusters: map[string]ClusterRolloutStatus{ - "cluster1": {GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: Failed, TimeOutTime: &fakeTimeMax}, - "cluster2": {GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: Failed, TimeOutTime: &fakeTimeMax}, + expectTimeOutClusters: []ClusterRolloutStatus{ + {ClusterName: "cluster2", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: TimeOut, LastTransitionTime: &fakeTime_120s, TimeOutTime: &fakeTime_30s}, + {ClusterName: "cluster3", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: TimeOut, LastTransitionTime: &fakeTime_120s, TimeOutTime: &fakeTime_30s}, + }, + }, + { + name: "test ProgressivePerGroup rollout with mandatroyDecisionGroup Succeeded and timeout 90s ", + rolloutStrategy: RolloutStrategy{ + Type: ProgressivePerGroup, + ProgressivePerGroup: &RolloutProgressivePerGroup{ + MandatoryDecisionGroups: MandatoryDecisionGroups{ + MandatoryDecisionGroups: []MandatoryDecisionGroup{ + {GroupName: "group1"}, + }, + }, + Timeout: Timeout{"90s"}, + }, + }, + existingScheduledClusterGroups: map[clusterv1beta1.GroupKey]sets.Set[string]{ + {GroupName: "group1", GroupIndex: 0}: sets.New[string]("cluster1", "cluster2", "cluster3"), + {GroupName: "", GroupIndex: 1}: sets.New[string]("cluster4", "cluster5", "cluster6"), + {GroupName: "", GroupIndex: 2}: sets.New[string]("cluster7", "cluster8", "cluster9"), + }, + clusterRolloutStatusFunc: dummyWorkloadClusterRolloutStatusFunc, + expectRolloutStrategy: &RolloutStrategy{ + Type: ProgressivePerGroup, + ProgressivePerGroup: &RolloutProgressivePerGroup{ + MandatoryDecisionGroups: MandatoryDecisionGroups{ + MandatoryDecisionGroups: []MandatoryDecisionGroup{ + {GroupName: "group1"}, + }, + }, + Timeout: Timeout{"90s"}, + }, + }, + existingWorkloads: []dummyWorkload{ + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster1", + State: done, + LastTransitionTime: &fakeTime_60s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster2", + State: done, + LastTransitionTime: &fakeTime_60s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster3", + State: done, + LastTransitionTime: &fakeTime_60s, + }, + }, + expectRolloutClusters: []ClusterRolloutStatus{ + {ClusterName: "cluster4", GroupKey: clusterv1beta1.GroupKey{GroupIndex: 1}, Status: ToApply}, + {ClusterName: "cluster5", GroupKey: clusterv1beta1.GroupKey{GroupIndex: 1}, Status: ToApply}, + {ClusterName: "cluster6", GroupKey: clusterv1beta1.GroupKey{GroupIndex: 1}, Status: ToApply}, }, - expectTimeOutClusters: map[string]ClusterRolloutStatus{}, }, } @@ -696,8 +1144,14 @@ func TestGetRolloutCluster_ProgressivePerGroup(t *testing.T) { fakeGetter := FakePlacementDecisionGetter{} tracker := clusterv1beta1.NewPlacementDecisionClustersTrackerWithGroups(nil, &fakeGetter, test.existingScheduledClusterGroups) - rolloutHandler, _ := NewRolloutHandler(tracker) - actualRolloutStrategy, actualRolloutResult, _ := rolloutHandler.GetRolloutCluster(test.rolloutStrategy, test.clusterRolloutStatusFunc) + rolloutHandler, _ := NewRolloutHandler(tracker, test.clusterRolloutStatusFunc) + existingRolloutClusters := []ClusterRolloutStatus{} + for _, workload := range test.existingWorkloads { + clsRolloutStatus, _ := test.clusterRolloutStatusFunc(workload.ClusterName, workload) + existingRolloutClusters = append(existingRolloutClusters, clsRolloutStatus) + } + + actualRolloutStrategy, actualRolloutResult, _ := rolloutHandler.GetRolloutCluster(test.rolloutStrategy, existingRolloutClusters) if !reflect.DeepEqual(actualRolloutStrategy.ProgressivePerGroup, test.expectRolloutStrategy.ProgressivePerGroup) { t.Errorf("Case: %v, Failed to run NewRolloutHandler. Expect strategy : %v, actual : %v", test.name, test.expectRolloutStrategy, actualRolloutStrategy) @@ -711,90 +1165,658 @@ func TestGetRolloutCluster_ProgressivePerGroup(t *testing.T) { t.Errorf("Case: %v, Failed to run NewRolloutHandler. Expect timeout clusters: %v, actual : %v", test.name, test.expectTimeOutClusters, actualRolloutResult.ClustersTimeOut) return } + if !reflect.DeepEqual(actualRolloutResult.ClustersRemoved, test.expectRemovedClusters) { + t.Errorf("Case: %v, Failed to run NewRolloutHandler. Expect removed clusters: %v, actual : %v", test.name, test.expectRemovedClusters, actualRolloutResult.ClustersRemoved) + return + } + } +} + +func TestGetRolloutCluster_ClusterAdded(t *testing.T) { + tests := []testCase{ + { + name: "test rollout all with timeout 90s and cluster added", + rolloutStrategy: RolloutStrategy{Type: All, All: &RolloutAll{Timeout: Timeout{"90s"}}}, + existingScheduledClusterGroups: map[clusterv1beta1.GroupKey]sets.Set[string]{ + {GroupName: "group1", GroupIndex: 0}: sets.New[string]("cluster1", "cluster2", "cluster7"), + {GroupName: "", GroupIndex: 1}: sets.New[string]("cluster3", "cluster4", "cluster5", "cluster6"), + }, + clusterRolloutStatusFunc: dummyWorkloadClusterRolloutStatusFunc, + existingWorkloads: []dummyWorkload{ + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster1", + State: done, + LastTransitionTime: &fakeTime_60s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster2", + State: missing, + LastTransitionTime: &fakeTime_120s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}, + ClusterName: "cluster3", + State: applying, + LastTransitionTime: &fakeTime_120s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}, + ClusterName: "cluster4", + State: missing, + LastTransitionTime: &fakeTime_60s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}, + ClusterName: "cluster5", + State: applying, + LastTransitionTime: &fakeTime_60s, + }, + }, + expectRolloutStrategy: &RolloutStrategy{Type: All, All: &RolloutAll{Timeout: Timeout{"90s"}}}, + expectRolloutClusters: []ClusterRolloutStatus{ + {ClusterName: "cluster4", GroupKey: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}, Status: Failed, LastTransitionTime: &fakeTime_60s, TimeOutTime: &fakeTime30s}, + {ClusterName: "cluster5", GroupKey: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}, Status: Progressing, LastTransitionTime: &fakeTime_60s, TimeOutTime: &fakeTime30s}, + {ClusterName: "cluster6", GroupKey: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}, Status: ToApply}, + {ClusterName: "cluster7", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: ToApply}, + }, + expectTimeOutClusters: []ClusterRolloutStatus{ + {ClusterName: "cluster2", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: TimeOut, LastTransitionTime: &fakeTime_120s, TimeOutTime: &fakeTime_30s}, + {ClusterName: "cluster3", GroupKey: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}, Status: TimeOut, LastTransitionTime: &fakeTime_120s, TimeOutTime: &fakeTime_30s}, + }, + }, + { + name: "test progressive rollout with mandatory decision groups Succeed and clusters added after rollout", + rolloutStrategy: RolloutStrategy{ + Type: Progressive, + Progressive: &RolloutProgressive{ + MandatoryDecisionGroups: MandatoryDecisionGroups{ + MandatoryDecisionGroups: []MandatoryDecisionGroup{ + {GroupName: "group1"}, + }, + }, + MaxConcurrency: intstr.FromInt(3), + Timeout: Timeout{"90s"}, + }, + }, + existingScheduledClusterGroups: map[clusterv1beta1.GroupKey]sets.Set[string]{ + {GroupName: "group1", GroupIndex: 0}: sets.New[string]("cluster1", "cluster2", "cluster7"), + {GroupName: "", GroupIndex: 1}: sets.New[string]("cluster3", "cluster4", "cluster8"), + {GroupName: "", GroupIndex: 2}: sets.New[string]("cluster5", "cluster6", "cluster9"), + }, + clusterRolloutStatusFunc: dummyWorkloadClusterRolloutStatusFunc, + expectRolloutStrategy: &RolloutStrategy{ + Type: Progressive, + Progressive: &RolloutProgressive{ + MandatoryDecisionGroups: MandatoryDecisionGroups{ + MandatoryDecisionGroups: []MandatoryDecisionGroup{ + {GroupName: "group1"}, + }, + }, + MaxConcurrency: intstr.FromInt(3), + Timeout: Timeout{"90s"}, + }, + }, + existingWorkloads: []dummyWorkload{ + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster1", + State: done, + LastTransitionTime: &fakeTime_60s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster2", + State: done, + LastTransitionTime: &fakeTime_60s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupIndex: 1}, + ClusterName: "cluster3", + State: done, + LastTransitionTime: &fakeTime_60s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupIndex: 1}, + ClusterName: "cluster4", + State: applying, + LastTransitionTime: &fakeTime_60s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupIndex: 2}, + ClusterName: "cluster5", + State: applying, + LastTransitionTime: &fakeTime_60s, + }, + }, + expectRolloutClusters: []ClusterRolloutStatus{ + //{ClusterName: "cluster3", GroupKey: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}, Status: Progressing, LastTransitionTime: &fakeTime_60s, TimeOutTime: &fakeTime30s}, + {ClusterName: "cluster4", GroupKey: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}, Status: Progressing, LastTransitionTime: &fakeTime_60s, TimeOutTime: &fakeTime30s}, + {ClusterName: "cluster5", GroupKey: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 2}, Status: Progressing, LastTransitionTime: &fakeTime_60s, TimeOutTime: &fakeTime30s}, + {ClusterName: "cluster7", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: ToApply}, + }, + }, + { + name: "test progressivePerGroup rollout with timeout 90s and cluster added after rollout start.", + rolloutStrategy: RolloutStrategy{ + Type: ProgressivePerGroup, + ProgressivePerGroup: &RolloutProgressivePerGroup{ + Timeout: Timeout{"90s"}, + }, + }, + existingScheduledClusterGroups: map[clusterv1beta1.GroupKey]sets.Set[string]{ + {GroupName: "group1", GroupIndex: 0}: sets.New[string]("cluster1", "cluster2", "cluster3"), + {GroupName: "", GroupIndex: 1}: sets.New[string]("cluster4", "cluster5", "cluster6"), + }, + clusterRolloutStatusFunc: dummyWorkloadClusterRolloutStatusFunc, + expectRolloutStrategy: &RolloutStrategy{ + Type: ProgressivePerGroup, + ProgressivePerGroup: &RolloutProgressivePerGroup{ + Timeout: Timeout{"90s"}, + }, + }, + existingWorkloads: []dummyWorkload{ + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster1", + State: done, + LastTransitionTime: &fakeTime_60s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster2", + State: done, + LastTransitionTime: &fakeTime_60s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupIndex: 1}, + ClusterName: "cluster4", + State: applying, + LastTransitionTime: &fakeTime_60s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupIndex: 1}, + ClusterName: "cluster5", + State: applying, + LastTransitionTime: &fakeTime_60s, + }, + }, + expectRolloutClusters: []ClusterRolloutStatus{ + {ClusterName: "cluster4", GroupKey: clusterv1beta1.GroupKey{GroupIndex: 1}, Status: Progressing, LastTransitionTime: &fakeTime_60s, TimeOutTime: &fakeTime30s}, + {ClusterName: "cluster5", GroupKey: clusterv1beta1.GroupKey{GroupIndex: 1}, Status: Progressing, LastTransitionTime: &fakeTime_60s, TimeOutTime: &fakeTime30s}, + {ClusterName: "cluster3", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: ToApply}, + }, + }, + } + + // Set the fake time for testing + RolloutClock = testingclock.NewFakeClock(fakeTime.Time) + + for _, test := range tests { + // Init fake placement decision tracker + fakeGetter := FakePlacementDecisionGetter{} + tracker := clusterv1beta1.NewPlacementDecisionClustersTrackerWithGroups(nil, &fakeGetter, test.existingScheduledClusterGroups) + + rolloutHandler, _ := NewRolloutHandler(tracker, test.clusterRolloutStatusFunc) + existingRolloutClusters := []ClusterRolloutStatus{} + for _, workload := range test.existingWorkloads { + clsRolloutStatus, _ := test.clusterRolloutStatusFunc(workload.ClusterName, workload) + existingRolloutClusters = append(existingRolloutClusters, clsRolloutStatus) + } + + actualRolloutStrategy, actualRolloutResult, _ := rolloutHandler.GetRolloutCluster(test.rolloutStrategy, existingRolloutClusters) + + if !reflect.DeepEqual(actualRolloutStrategy.Type, test.expectRolloutStrategy.Type) { + t.Errorf("Case: %v, Failed to run NewRolloutHandler. Expect strategy : %v, actual : %v", test.name, test.expectRolloutStrategy, actualRolloutStrategy) + return + } + if !reflect.DeepEqual(actualRolloutResult.ClustersToRollout, test.expectRolloutClusters) { + t.Errorf("Case: %v, Failed to run NewRolloutHandler. Expect rollout clusters: %v, actual : %v", test.name, test.expectRolloutClusters, actualRolloutResult.ClustersToRollout) + return + } + if !reflect.DeepEqual(actualRolloutResult.ClustersTimeOut, test.expectTimeOutClusters) { + t.Errorf("Case: %v, Failed to run NewRolloutHandler. Expect timeout clusters: %v, actual : %v", test.name, test.expectTimeOutClusters, actualRolloutResult.ClustersTimeOut) + return + } + if !reflect.DeepEqual(actualRolloutResult.ClustersRemoved, test.expectRemovedClusters) { + t.Errorf("Case: %v, Failed to run NewRolloutHandler. Expect removed clusters: %v, actual : %v", test.name, test.expectRemovedClusters, actualRolloutResult.ClustersRemoved) + return + } + } +} + +func TestGetRolloutCluster_ClusterRemoved(t *testing.T) { + tests := []testCase{ + { + name: "test rollout all with timeout 90s and clusters removed", + rolloutStrategy: RolloutStrategy{Type: All, All: &RolloutAll{Timeout: Timeout{"90s"}}}, + existingScheduledClusterGroups: map[clusterv1beta1.GroupKey]sets.Set[string]{ + {GroupName: "group1", GroupIndex: 0}: sets.New[string]("cluster1"), + {GroupName: "", GroupIndex: 1}: sets.New[string]("cluster3", "cluster5"), + }, + clusterRolloutStatusFunc: dummyWorkloadClusterRolloutStatusFunc, + existingWorkloads: []dummyWorkload{ + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster1", + State: done, + LastTransitionTime: &fakeTime_60s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster2", + State: missing, + LastTransitionTime: &fakeTime_120s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}, + ClusterName: "cluster3", + State: applying, + LastTransitionTime: &fakeTime_120s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}, + ClusterName: "cluster4", + State: missing, + LastTransitionTime: &fakeTime_60s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}, + ClusterName: "cluster5", + State: applying, + LastTransitionTime: &fakeTime_60s, + }, + }, + expectRolloutStrategy: &RolloutStrategy{Type: All, All: &RolloutAll{Timeout: Timeout{"90s"}}}, + expectRolloutClusters: []ClusterRolloutStatus{ + {ClusterName: "cluster5", GroupKey: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}, Status: Progressing, LastTransitionTime: &fakeTime_60s, TimeOutTime: &fakeTime30s}, + }, + expectTimeOutClusters: []ClusterRolloutStatus{ + {ClusterName: "cluster3", GroupKey: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}, Status: TimeOut, LastTransitionTime: &fakeTime_120s, TimeOutTime: &fakeTime_30s}, + }, + expectRemovedClusters: []ClusterRolloutStatus{ + {ClusterName: "cluster2", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: Failed, LastTransitionTime: &fakeTime_120s}, + {ClusterName: "cluster4", GroupKey: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}, Status: Failed, LastTransitionTime: &fakeTime_60s}, + }, + }, + { + name: "test progressive rollout with timeout 90s and cluster removed", + rolloutStrategy: RolloutStrategy{ + Type: Progressive, + Progressive: &RolloutProgressive{ + Timeout: Timeout{"90s"}, + MaxConcurrency: intstr.FromInt(2), + }, + }, + existingScheduledClusterGroups: map[clusterv1beta1.GroupKey]sets.Set[string]{ + {GroupName: "group1", GroupIndex: 0}: sets.New[string]("cluster2", "cluster3"), + {GroupName: "", GroupIndex: 1}: sets.New[string]("cluster4", "cluster5", "cluster6"), + }, + clusterRolloutStatusFunc: dummyWorkloadClusterRolloutStatusFunc, + expectRolloutStrategy: &RolloutStrategy{ + Type: Progressive, + Progressive: &RolloutProgressive{ + Timeout: Timeout{"90s"}, + MaxConcurrency: intstr.FromInt(2), + }, + }, + existingWorkloads: []dummyWorkload{ + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster1", + State: done, + LastTransitionTime: &fakeTime_60s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster2", + State: missing, + LastTransitionTime: &fakeTime_120s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster3", + State: applying, + LastTransitionTime: &fakeTime_60s, + }, + }, + expectRolloutClusters: []ClusterRolloutStatus{ + {ClusterName: "cluster3", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: Progressing, LastTransitionTime: &fakeTime_60s, TimeOutTime: &fakeTime30s}, + {ClusterName: "cluster4", GroupKey: clusterv1beta1.GroupKey{GroupName: "", GroupIndex: 1}, Status: ToApply}, + }, + expectTimeOutClusters: []ClusterRolloutStatus{ + {ClusterName: "cluster2", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: TimeOut, LastTransitionTime: &fakeTime_120s, TimeOutTime: &fakeTime_30s}, + }, + expectRemovedClusters: []ClusterRolloutStatus{ + {ClusterName: "cluster1", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: Succeeded, LastTransitionTime: &fakeTime_60s}, + }, + }, + { + name: "test progressive rollout with mandatroyDecisionGroup, timeout 90s and cluster removed from mandatroyDecisionGroup", + rolloutStrategy: RolloutStrategy{ + Type: Progressive, + Progressive: &RolloutProgressive{ + MandatoryDecisionGroups: MandatoryDecisionGroups{ + MandatoryDecisionGroups: []MandatoryDecisionGroup{ + {GroupName: "group1"}, + }, + }, + Timeout: Timeout{"90s"}, + MaxConcurrency: intstr.FromInt(2), + }, + }, + existingScheduledClusterGroups: map[clusterv1beta1.GroupKey]sets.Set[string]{ + {GroupName: "group1", GroupIndex: 0}: sets.New[string]("cluster1", "cluster3"), + {GroupName: "", GroupIndex: 1}: sets.New[string]("cluster4", "cluster5", "cluster6"), + }, + clusterRolloutStatusFunc: dummyWorkloadClusterRolloutStatusFunc, + expectRolloutStrategy: &RolloutStrategy{ + Type: Progressive, + Progressive: &RolloutProgressive{ + MandatoryDecisionGroups: MandatoryDecisionGroups{ + MandatoryDecisionGroups: []MandatoryDecisionGroup{ + {GroupName: "group1"}, + }, + }, + Timeout: Timeout{"90s"}, + MaxConcurrency: intstr.FromInt(2), + }, + }, + existingWorkloads: []dummyWorkload{ + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster1", + State: done, + LastTransitionTime: &fakeTime_60s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster2", + State: missing, + LastTransitionTime: &fakeTime_120s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster3", + State: done, + LastTransitionTime: &fakeTime_60s, + }, + }, + expectRolloutClusters: []ClusterRolloutStatus{ + {ClusterName: "cluster4", GroupKey: clusterv1beta1.GroupKey{GroupIndex: 1}, Status: ToApply}, + {ClusterName: "cluster5", GroupKey: clusterv1beta1.GroupKey{GroupIndex: 1}, Status: ToApply}, + }, + expectRemovedClusters: []ClusterRolloutStatus{ + {ClusterName: "cluster2", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: Failed, LastTransitionTime: &fakeTime_120s}, + }, + }, + { + name: "test progressivePerGroup rollout with timeout 90s and cluster removed after rollout start.", + rolloutStrategy: RolloutStrategy{ + Type: ProgressivePerGroup, + ProgressivePerGroup: &RolloutProgressivePerGroup{ + Timeout: Timeout{"90s"}, + }, + }, + existingScheduledClusterGroups: map[clusterv1beta1.GroupKey]sets.Set[string]{ + {GroupName: "group1", GroupIndex: 0}: sets.New[string]("cluster1", "cluster3"), + {GroupName: "", GroupIndex: 1}: sets.New[string]("cluster4", "cluster5", "cluster6"), + }, + clusterRolloutStatusFunc: dummyWorkloadClusterRolloutStatusFunc, + expectRolloutStrategy: &RolloutStrategy{ + Type: ProgressivePerGroup, + ProgressivePerGroup: &RolloutProgressivePerGroup{ + Timeout: Timeout{"90s"}, + }, + }, + existingWorkloads: []dummyWorkload{ + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster1", + State: done, + LastTransitionTime: &fakeTime_60s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster2", + State: missing, + LastTransitionTime: &fakeTime_60s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster3", + State: applying, + LastTransitionTime: &fakeTime_60s, + }, + }, + expectRolloutClusters: []ClusterRolloutStatus{ + {ClusterName: "cluster3", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: Progressing, LastTransitionTime: &fakeTime_60s, TimeOutTime: &fakeTime30s}, + }, + expectRemovedClusters: []ClusterRolloutStatus{ + {ClusterName: "cluster2", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: Failed, LastTransitionTime: &fakeTime_60s}, + }, + }, + { + name: "test progressivePerGroup rollout with timeout 90s and cluster removed after rollout start while the group timeout.", + rolloutStrategy: RolloutStrategy{ + Type: ProgressivePerGroup, + ProgressivePerGroup: &RolloutProgressivePerGroup{ + Timeout: Timeout{"90s"}, + }, + }, + existingScheduledClusterGroups: map[clusterv1beta1.GroupKey]sets.Set[string]{ + {GroupName: "group1", GroupIndex: 0}: sets.New[string]("cluster1", "cluster3"), + {GroupName: "", GroupIndex: 1}: sets.New[string]("cluster4", "cluster5", "cluster6"), + }, + clusterRolloutStatusFunc: dummyWorkloadClusterRolloutStatusFunc, + expectRolloutStrategy: &RolloutStrategy{ + Type: ProgressivePerGroup, + ProgressivePerGroup: &RolloutProgressivePerGroup{ + Timeout: Timeout{"90s"}, + }, + }, + existingWorkloads: []dummyWorkload{ + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster1", + State: done, + LastTransitionTime: &fakeTime_60s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster2", + State: missing, + LastTransitionTime: &fakeTime_60s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster3", + State: applying, + LastTransitionTime: &fakeTime_120s, + }, + }, + expectRolloutClusters: []ClusterRolloutStatus{ + {ClusterName: "cluster4", GroupKey: clusterv1beta1.GroupKey{GroupIndex: 1}, Status: ToApply}, + {ClusterName: "cluster5", GroupKey: clusterv1beta1.GroupKey{GroupIndex: 1}, Status: ToApply}, + {ClusterName: "cluster6", GroupKey: clusterv1beta1.GroupKey{GroupIndex: 1}, Status: ToApply}, + }, + expectTimeOutClusters: []ClusterRolloutStatus{ + {ClusterName: "cluster3", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: TimeOut, LastTransitionTime: &fakeTime_120s, TimeOutTime: &fakeTime_30s}, + }, + expectRemovedClusters: []ClusterRolloutStatus{ + {ClusterName: "cluster2", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: Failed, LastTransitionTime: &fakeTime_60s}, + }, + }, + { + name: "test ProgressivePerGroup rollout with mandatroyDecisionGroup, timeout 90s and cluster removed from mandatroyDecisionGroup", + rolloutStrategy: RolloutStrategy{ + Type: ProgressivePerGroup, + ProgressivePerGroup: &RolloutProgressivePerGroup{ + MandatoryDecisionGroups: MandatoryDecisionGroups{ + MandatoryDecisionGroups: []MandatoryDecisionGroup{ + {GroupName: "group1"}, + }, + }, + Timeout: Timeout{"90s"}, + }, + }, + existingScheduledClusterGroups: map[clusterv1beta1.GroupKey]sets.Set[string]{ + {GroupName: "group1", GroupIndex: 0}: sets.New[string]("cluster1", "cluster3"), + {GroupName: "", GroupIndex: 1}: sets.New[string]("cluster4", "cluster5", "cluster6"), + {GroupName: "", GroupIndex: 2}: sets.New[string]("cluster7", "cluster8", "cluster9"), + }, + clusterRolloutStatusFunc: dummyWorkloadClusterRolloutStatusFunc, + expectRolloutStrategy: &RolloutStrategy{ + Type: ProgressivePerGroup, + ProgressivePerGroup: &RolloutProgressivePerGroup{ + MandatoryDecisionGroups: MandatoryDecisionGroups{ + MandatoryDecisionGroups: []MandatoryDecisionGroup{ + {GroupName: "group1"}, + }, + }, + Timeout: Timeout{"90s"}, + }, + }, + existingWorkloads: []dummyWorkload{ + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster1", + State: done, + LastTransitionTime: &fakeTime_60s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster2", + State: missing, + LastTransitionTime: &fakeTime_120s, + }, + { + ClusterGroup: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, + ClusterName: "cluster3", + State: done, + LastTransitionTime: &fakeTime_60s, + }, + }, + expectRolloutClusters: []ClusterRolloutStatus{ + {ClusterName: "cluster4", GroupKey: clusterv1beta1.GroupKey{GroupIndex: 1}, Status: ToApply}, + {ClusterName: "cluster5", GroupKey: clusterv1beta1.GroupKey{GroupIndex: 1}, Status: ToApply}, + {ClusterName: "cluster6", GroupKey: clusterv1beta1.GroupKey{GroupIndex: 1}, Status: ToApply}, + }, + expectRemovedClusters: []ClusterRolloutStatus{ + {ClusterName: "cluster2", GroupKey: clusterv1beta1.GroupKey{GroupName: "group1", GroupIndex: 0}, Status: Failed, LastTransitionTime: &fakeTime_120s}, + }, + }, + } + + // Set the fake time for testing + RolloutClock = testingclock.NewFakeClock(fakeTime.Time) + + for _, test := range tests { + // Init fake placement decision tracker + fakeGetter := FakePlacementDecisionGetter{} + tracker := clusterv1beta1.NewPlacementDecisionClustersTrackerWithGroups(nil, &fakeGetter, test.existingScheduledClusterGroups) + + rolloutHandler, _ := NewRolloutHandler(tracker, test.clusterRolloutStatusFunc) + existingRolloutClusters := []ClusterRolloutStatus{} + for _, workload := range test.existingWorkloads { + clsRolloutStatus, _ := test.clusterRolloutStatusFunc(workload.ClusterName, workload) + existingRolloutClusters = append(existingRolloutClusters, clsRolloutStatus) + } + + actualRolloutStrategy, actualRolloutResult, _ := rolloutHandler.GetRolloutCluster(test.rolloutStrategy, existingRolloutClusters) + + if !reflect.DeepEqual(actualRolloutStrategy.Type, test.expectRolloutStrategy.Type) { + t.Errorf("Case: %v, Failed to run NewRolloutHandler. Expect strategy : %v, actual : %v", test.name, test.expectRolloutStrategy, actualRolloutStrategy) + return + } + if !reflect.DeepEqual(actualRolloutResult.ClustersToRollout, test.expectRolloutClusters) { + t.Errorf("Case: %v, Failed to run NewRolloutHandler. Expect rollout clusters: %v, actual : %v", test.name, test.expectRolloutClusters, actualRolloutResult.ClustersToRollout) + return + } + if !reflect.DeepEqual(actualRolloutResult.ClustersTimeOut, test.expectTimeOutClusters) { + t.Errorf("Case: %v, Failed to run NewRolloutHandler. Expect timeout clusters: %v, actual : %v", test.name, test.expectTimeOutClusters, actualRolloutResult.ClustersTimeOut) + return + } + if !reflect.DeepEqual(actualRolloutResult.ClustersRemoved, test.expectRemovedClusters) { + t.Errorf("Case: %v, Failed to run NewRolloutHandler. Expect removed clusters: %v, actual : %v", test.name, test.expectRemovedClusters, actualRolloutResult.ClustersRemoved) + return + } } + } -func TestNeedToUpdate(t *testing.T) { +func TestDetermineRolloutStatus(t *testing.T) { testCases := []struct { - name string - status RolloutStatus - lastTransition *metav1.Time - timeout time.Duration - expectedResult bool + name string + timeout time.Duration + clusterStatus ClusterRolloutStatus + expectRolloutClusters []ClusterRolloutStatus + expectTimeOutClusters []ClusterRolloutStatus }{ { - name: "ToApply status", - status: ToApply, - lastTransition: nil, - timeout: time.Minute, - expectedResult: true, + name: "ToApply status", + clusterStatus: ClusterRolloutStatus{ClusterName: "cluster1", Status: ToApply}, + timeout: time.Minute, + expectRolloutClusters: []ClusterRolloutStatus{{ClusterName: "cluster1", Status: ToApply}}, + }, + { + name: "Skip status", + clusterStatus: ClusterRolloutStatus{ClusterName: "cluster1", Status: Skip}, + timeout: time.Minute, }, { - name: "Progressing status", - status: Progressing, - lastTransition: nil, - timeout: time.Minute, - expectedResult: true, + name: "Succeeded status", + clusterStatus: ClusterRolloutStatus{ClusterName: "cluster1", Status: Succeeded}, + timeout: time.Minute, }, { - name: "Succeeded status", - status: Succeeded, - lastTransition: nil, - timeout: time.Minute, - expectedResult: false, + name: "TimeOut status", + clusterStatus: ClusterRolloutStatus{ClusterName: "cluster1", Status: TimeOut}, + timeout: time.Minute, }, { - name: "Failed status, timeout is None", - status: Failed, - lastTransition: &fakeTime, - timeout: maxTimeDuration, - expectedResult: true, + name: "Progressing status within the timeout duration", + clusterStatus: ClusterRolloutStatus{ClusterName: "cluster1", Status: Progressing, LastTransitionTime: &fakeTime_30s}, + timeout: time.Minute, + expectRolloutClusters: []ClusterRolloutStatus{{ClusterName: "cluster1", Status: Progressing, LastTransitionTime: &fakeTime_30s, TimeOutTime: &fakeTime30s}}, }, { - name: "Failed status, timeout is 0", - status: Failed, - lastTransition: &fakeTime, - timeout: 0, - expectedResult: false, + name: "Failed status out the timeout duration", + clusterStatus: ClusterRolloutStatus{ClusterName: "cluster1", Status: Failed, LastTransitionTime: &fakeTime_60s}, + timeout: time.Minute, + expectTimeOutClusters: []ClusterRolloutStatus{{ClusterName: "cluster1", Status: TimeOut, LastTransitionTime: &fakeTime_60s, TimeOutTime: &fakeTime}}, }, { - name: "Failed status, within the timeout duration", - status: Failed, - lastTransition: &fakeTime_60s, - timeout: 2 * time.Minute, - expectedResult: true, + name: "unknown status out the timeout duration", + clusterStatus: ClusterRolloutStatus{ClusterName: "cluster1", Status: 8, LastTransitionTime: &fakeTime_60s}, + timeout: time.Minute, + expectTimeOutClusters: []ClusterRolloutStatus{{ClusterName: "cluster1", Status: TimeOut, LastTransitionTime: &fakeTime_60s, TimeOutTime: &fakeTime}}, }, { - name: "Failed status, outside the timeout duration", - status: Failed, - lastTransition: &fakeTime_120s, - timeout: time.Minute, - expectedResult: false, + name: "unknown status within the timeout duration", + clusterStatus: ClusterRolloutStatus{ClusterName: "cluster1", Status: 9, LastTransitionTime: &fakeTime_30s}, + timeout: time.Minute, + expectRolloutClusters: []ClusterRolloutStatus{{ClusterName: "cluster1", Status: 9, LastTransitionTime: &fakeTime_30s, TimeOutTime: &fakeTime30s}}, }, } RolloutClock = testingclock.NewFakeClock(fakeTime.Time) - // Run the tests for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - // Create a ClusterRolloutStatus instance - status := ClusterRolloutStatus{ - Status: tc.status, - LastTransitionTime: tc.lastTransition, - } - - // Call the determineRolloutStatusAndContinue function - _, result := determineRolloutStatusAndContinue(status, tc.timeout) - - // Compare the result with the expected result - if result != tc.expectedResult { - t.Errorf("Expected result: %v, got: %v", tc.expectedResult, result) - } - }) + var rolloutClusters, timeoutClusters []ClusterRolloutStatus + rolloutClusters, timeoutClusters = determineRolloutStatus(tc.clusterStatus, tc.timeout, rolloutClusters, timeoutClusters) + if !reflect.DeepEqual(rolloutClusters, tc.expectRolloutClusters) { + t.Errorf("Case: %v, Failed to run NewRolloutHandler. Expect rollout clusters: %v, actual : %v", tc.name, tc.expectRolloutClusters, rolloutClusters) + return + } + if !reflect.DeepEqual(timeoutClusters, tc.expectTimeOutClusters) { + t.Errorf("Case: %v, Failed to run NewRolloutHandler. Expect timeout clusters: %v, actual : %v", tc.name, tc.expectTimeOutClusters, timeoutClusters) + return + } } } -func TestCalculateLength(t *testing.T) { +func TestCalculateRolloutSize(t *testing.T) { total := 100 tests := []struct { @@ -813,7 +1835,7 @@ func TestCalculateLength(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - length, err := calculateLength(test.maxConcurrency, total) + length, err := calculateRolloutSize(test.maxConcurrency, total) // Compare the result with the expected result if length != test.expected { diff --git a/cluster/v1alpha1/zz_generated.deepcopy.go b/cluster/v1alpha1/zz_generated.deepcopy.go index a72d4c816..2be1d2833 100644 --- a/cluster/v1alpha1/zz_generated.deepcopy.go +++ b/cluster/v1alpha1/zz_generated.deepcopy.go @@ -1,13 +1,13 @@ //go:build !ignore_autogenerated // +build !ignore_autogenerated -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by controller-gen. DO NOT EDIT. package v1alpha1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -16,7 +16,6 @@ func (in *AddOnPlacementScore) DeepCopyInto(out *AddOnPlacementScore) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Status.DeepCopyInto(&out.Status) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddOnPlacementScore. @@ -40,7 +39,6 @@ func (in *AddOnPlacementScore) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AddOnPlacementScoreItem) DeepCopyInto(out *AddOnPlacementScoreItem) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddOnPlacementScoreItem. @@ -65,7 +63,6 @@ func (in *AddOnPlacementScoreList) DeepCopyInto(out *AddOnPlacementScoreList) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddOnPlacementScoreList. @@ -105,7 +102,6 @@ func (in *AddOnPlacementScoreStatus) DeepCopyInto(out *AddOnPlacementScoreStatus in, out := &in.ValidUntil, &out.ValidUntil *out = (*in).DeepCopy() } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddOnPlacementScoreStatus. @@ -124,7 +120,6 @@ func (in *ClusterClaim) DeepCopyInto(out *ClusterClaim) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) out.Spec = in.Spec - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterClaim. @@ -157,7 +152,6 @@ func (in *ClusterClaimList) DeepCopyInto(out *ClusterClaimList) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterClaimList. @@ -181,7 +175,6 @@ func (in *ClusterClaimList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterClaimSpec) DeepCopyInto(out *ClusterClaimSpec) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterClaimSpec. @@ -206,7 +199,6 @@ func (in *ClusterRolloutStatus) DeepCopyInto(out *ClusterRolloutStatus) { in, out := &in.TimeOutTime, &out.TimeOutTime *out = (*in).DeepCopy() } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRolloutStatus. @@ -222,7 +214,6 @@ func (in *ClusterRolloutStatus) DeepCopy() *ClusterRolloutStatus { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MandatoryDecisionGroup) DeepCopyInto(out *MandatoryDecisionGroup) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MandatoryDecisionGroup. @@ -243,7 +234,6 @@ func (in *MandatoryDecisionGroups) DeepCopyInto(out *MandatoryDecisionGroups) { *out = make([]MandatoryDecisionGroup, len(*in)) copy(*out, *in) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MandatoryDecisionGroups. @@ -260,7 +250,6 @@ func (in *MandatoryDecisionGroups) DeepCopy() *MandatoryDecisionGroups { func (in *RolloutAll) DeepCopyInto(out *RolloutAll) { *out = *in out.Timeout = in.Timeout - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutAll. @@ -279,7 +268,6 @@ func (in *RolloutProgressive) DeepCopyInto(out *RolloutProgressive) { in.MandatoryDecisionGroups.DeepCopyInto(&out.MandatoryDecisionGroups) out.MaxConcurrency = in.MaxConcurrency out.Timeout = in.Timeout - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutProgressive. @@ -297,7 +285,6 @@ func (in *RolloutProgressivePerGroup) DeepCopyInto(out *RolloutProgressivePerGro *out = *in in.MandatoryDecisionGroups.DeepCopyInto(&out.MandatoryDecisionGroups) out.Timeout = in.Timeout - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutProgressivePerGroup. @@ -315,19 +302,25 @@ func (in *RolloutResult) DeepCopyInto(out *RolloutResult) { *out = *in if in.ClustersToRollout != nil { in, out := &in.ClustersToRollout, &out.ClustersToRollout - *out = make(map[string]ClusterRolloutStatus, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() + *out = make([]ClusterRolloutStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.ClustersTimeOut != nil { in, out := &in.ClustersTimeOut, &out.ClustersTimeOut - *out = make(map[string]ClusterRolloutStatus, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() + *out = make([]ClusterRolloutStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ClustersRemoved != nil { + in, out := &in.ClustersRemoved, &out.ClustersRemoved + *out = make([]ClusterRolloutStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutResult. @@ -358,7 +351,6 @@ func (in *RolloutStrategy) DeepCopyInto(out *RolloutStrategy) { *out = new(RolloutProgressivePerGroup) (*in).DeepCopyInto(*out) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutStrategy. @@ -374,7 +366,6 @@ func (in *RolloutStrategy) DeepCopy() *RolloutStrategy { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Timeout) DeepCopyInto(out *Timeout) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Timeout. diff --git a/cluster/v1beta1/0000_00_clusters.open-cluster-management.io_managedclustersets.crd.yaml b/cluster/v1beta1/0000_00_clusters.open-cluster-management.io_managedclustersets.crd.yaml new file mode 100644 index 000000000..5ceb4eae9 --- /dev/null +++ b/cluster/v1beta1/0000_00_clusters.open-cluster-management.io_managedclustersets.crd.yaml @@ -0,0 +1,207 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: managedclustersets.cluster.open-cluster-management.io +spec: + group: cluster.open-cluster-management.io + names: + kind: ManagedClusterSet + listKind: ManagedClusterSetList + plural: managedclustersets + shortNames: + - mclset + - mclsets + singular: managedclusterset + preserveUnknownFields: false + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="ClusterSetEmpty")].status + name: Empty + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: "ManagedClusterSet defines a group of ManagedClusters that user's + workload can run on. A workload can be defined to deployed on a ManagedClusterSet, + which mean: 1. The workload can run on any ManagedCluster in the ManagedClusterSet + 2. The workload cannot run on any ManagedCluster outside the ManagedClusterSet + 3. The service exposed by the workload can be shared in any ManagedCluster + in the ManagedClusterSet \n In order to assign a ManagedCluster to a certian + ManagedClusterSet, add a label with name `cluster.open-cluster-management.io/clusterset` + on the ManagedCluster to refers to the ManagedClusterSet. User is not allow + to add/remove this label on a ManagedCluster unless they have a RBAC rule + to CREATE on a virtual subresource of managedclustersets/join. In order + to update this label, user must have the permission on both the old and + new ManagedClusterSet." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + default: + clusterSelector: + selectorType: LegacyClusterSetLabel + description: Spec defines the attributes of the ManagedClusterSet + properties: + clusterSelector: + default: + selectorType: LegacyClusterSetLabel + description: ClusterSelector represents a selector of ManagedClusters + properties: + labelSelector: + description: LabelSelector define the general labelSelector which + clusterset will use to select target managedClusters + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If + the operator is In or NotIn, the values array must + be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A + single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is "key", + the operator is "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + selectorType: + default: LegacyClusterSetLabel + description: SelectorType could only be "LegacyClusterSetLabel" + or "LabelSelector" "LegacyClusterSetLabel" means to use label + "cluster.open-cluster-management.io/clusterset:"" to select target clusters. "LabelSelector" means use + labelSelector to select target managedClusters + enum: + - LegacyClusterSetLabel + - LabelSelector + type: string + type: object + type: object + status: + description: Status represents the current status of the ManagedClusterSet + properties: + conditions: + description: Conditions contains the different condition statuses + for this ManagedClusterSet. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/cluster/v1beta1/0000_01_clusters.open-cluster-management.io_managedclustersetbindings.crd.yaml b/cluster/v1beta1/0000_01_clusters.open-cluster-management.io_managedclustersetbindings.crd.yaml new file mode 100644 index 000000000..359f13f26 --- /dev/null +++ b/cluster/v1beta1/0000_01_clusters.open-cluster-management.io_managedclustersetbindings.crd.yaml @@ -0,0 +1,136 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: managedclustersetbindings.cluster.open-cluster-management.io +spec: + group: cluster.open-cluster-management.io + names: + kind: ManagedClusterSetBinding + listKind: ManagedClusterSetBindingList + plural: managedclustersetbindings + shortNames: + - mclsetbinding + - mclsetbindings + singular: managedclustersetbinding + preserveUnknownFields: false + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: ManagedClusterSetBinding projects a ManagedClusterSet into a + certain namespace. User is able to create a ManagedClusterSetBinding in + a namespace and bind it to a ManagedClusterSet if they have an RBAC rule + to CREATE on the virtual subresource of managedclustersets/bind. Workloads + created in the same namespace can only be distributed to ManagedClusters + in ManagedClusterSets bound in this namespace by higher level controllers. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec defines the attributes of ManagedClusterSetBinding. + properties: + clusterSet: + description: ClusterSet is the name of the ManagedClusterSet to bind. + It must match the instance name of the ManagedClusterSetBinding + and cannot change once created. User is allowed to set this field + if they have an RBAC rule to CREATE on the virtual subresource of + managedclustersets/bind. + minLength: 1 + type: string + type: object + status: + description: Status represents the current status of the ManagedClusterSetBinding + properties: + conditions: + description: Conditions contains the different condition statuses + for this ManagedClusterSetBinding. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/cluster/v1beta1/helpers.go b/cluster/v1beta1/helpers.go index 503dbc7b6..e5f996a5c 100644 --- a/cluster/v1beta1/helpers.go +++ b/cluster/v1beta1/helpers.go @@ -6,10 +6,113 @@ import ( "strconv" "sync" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/sets" + v1 "open-cluster-management.io/api/cluster/v1" ) +type ManagedClustersGetter interface { + List(selector labels.Selector) (ret []*v1.ManagedCluster, err error) +} + +type ManagedClusterSetsGetter interface { + List(selector labels.Selector) (ret []*ManagedClusterSet, err error) +} + +type ManagedClusterSetBindingsGetter interface { + List(namespace string, selector labels.Selector) (ret []*ManagedClusterSetBinding, err error) +} + +// GetClustersFromClusterSet return the ManagedClusterSet's managedClusters +func GetClustersFromClusterSet(clusterSet *ManagedClusterSet, + clustersGetter ManagedClustersGetter) ([]*v1.ManagedCluster, error) { + var clusters []*v1.ManagedCluster + + if clusterSet == nil { + return nil, nil + } + + clusterSelector, err := BuildClusterSelector(clusterSet) + if err != nil { + return nil, err + } + if clusterSelector == nil { + return nil, fmt.Errorf("failed to build ClusterSelector with clusterSet: %v", clusterSet) + } + clusters, err = clustersGetter.List(clusterSelector) + if err != nil { + return nil, fmt.Errorf("failed to list ManagedClusters: %w", err) + } + return clusters, nil +} + +// GetClusterSetsOfClusterByCluster return the managedClusterSets of a managedCluster +func GetClusterSetsOfCluster(cluster *v1.ManagedCluster, + clusterSetsGetter ManagedClusterSetsGetter) ([]*ManagedClusterSet, error) { + var returnClusterSets []*ManagedClusterSet + + if cluster == nil { + return nil, nil + } + + allClusterSets, err := clusterSetsGetter.List(labels.Everything()) + if err != nil { + return nil, err + } + for _, clusterSet := range allClusterSets { + clusterSelector, err := BuildClusterSelector(clusterSet) + if err != nil { + return nil, err + } + if clusterSelector == nil { + return nil, fmt.Errorf("failed to build ClusterSelector with clusterSet: %v", clusterSet) + } + if clusterSelector.Matches(labels.Set(cluster.Labels)) { + returnClusterSets = append(returnClusterSets, clusterSet) + } + } + return returnClusterSets, nil +} + +func BuildClusterSelector(clusterSet *ManagedClusterSet) (labels.Selector, error) { + if clusterSet == nil { + return nil, nil + } + selectorType := clusterSet.Spec.ClusterSelector.SelectorType + + switch selectorType { + case "", LegacyClusterSetLabel: + return labels.SelectorFromSet(labels.Set{ + ClusterSetLabel: clusterSet.Name, + }), nil + case LabelSelector: + return metav1.LabelSelectorAsSelector(clusterSet.Spec.ClusterSelector.LabelSelector) + default: + return nil, fmt.Errorf("selectorType is not right: %s", clusterSet.Spec.ClusterSelector.SelectorType) + } +} + +// GetBoundManagedClusterSetBindings returns all bindings that are bounded to clustersets in the given namespace. +func GetBoundManagedClusterSetBindings(namespace string, + clusterSetBindingsGetter ManagedClusterSetBindingsGetter) ([]*ManagedClusterSetBinding, error) { + // get all clusterset bindings under the namespace + bindings, err := clusterSetBindingsGetter.List(namespace, labels.Everything()) + if err != nil { + return nil, err + } + + boundBindings := []*ManagedClusterSetBinding{} + for _, binding := range bindings { + if meta.IsStatusConditionTrue(binding.Status.Conditions, ClusterSetBindingBoundType) { + boundBindings = append(boundBindings, binding) + } + } + + return boundBindings, nil +} + type PlacementDecisionGetter interface { List(selector labels.Selector, namespace string) (ret []*PlacementDecision, err error) } diff --git a/cluster/v1beta1/helpers_test.go b/cluster/v1beta1/helpers_test.go index a41a2adb7..d54ea7c5d 100644 --- a/cluster/v1beta1/helpers_test.go +++ b/cluster/v1beta1/helpers_test.go @@ -1,15 +1,393 @@ package v1beta1 import ( + "context" + "os" "reflect" "strconv" "testing" + cliScheme "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog/v2" + v1 "open-cluster-management.io/api/cluster/v1" +) + +var ( + scheme = runtime.NewScheme() ) +type clustersGetter struct { + client client.Client +} +type clusterSetsGetter struct { + client client.Client +} +type clusterSetBindingsGetter struct { + client client.Client +} + +var existingClusterSetBindings = []*ManagedClusterSetBinding{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "dev", + Namespace: "default", + }, + Spec: ManagedClusterSetBindingSpec{ + ClusterSet: "dev", + }, + Status: ManagedClusterSetBindingStatus{ + Conditions: []metav1.Condition{ + { + Type: ClusterSetBindingBoundType, + Status: metav1.ConditionTrue, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "global", + Namespace: "default", + }, + Spec: ManagedClusterSetBindingSpec{ + ClusterSet: "global", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "no-such-cluster-set", + Namespace: "kube-system", + }, + Spec: ManagedClusterSetBindingSpec{ + ClusterSet: "no-such-cluster-set", + }, + }, +} +var existingClusterSets = []*ManagedClusterSet{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "dev", + }, + Spec: ManagedClusterSetSpec{}, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "global", + }, + Spec: ManagedClusterSetSpec{ + ClusterSelector: ManagedClusterSelector{ + SelectorType: LabelSelector, + LabelSelector: &metav1.LabelSelector{}, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "openshift", + }, + Spec: ManagedClusterSetSpec{ + ClusterSelector: ManagedClusterSelector{ + SelectorType: LabelSelector, + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "vendor": "openshift", + }, + }, + }, + }, + }, +} +var existingClusters = []*v1.ManagedCluster{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "c1", + Labels: map[string]string{ + "vendor": "openshift", + ClusterSetLabel: "dev", + }, + }, + Spec: v1.ManagedClusterSpec{}, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "c2", + Labels: map[string]string{ + "cloud": "aws", + "vendor": "openshift", + ClusterSetLabel: "dev", + }, + }, + Spec: v1.ManagedClusterSpec{}, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "c3", + Labels: map[string]string{ + "cloud": "aws", + }, + }, + Spec: v1.ManagedClusterSpec{}, + }, +} + +func TestMain(m *testing.M) { + if err := v1.AddToScheme(cliScheme.Scheme); err != nil { + klog.Errorf("Failed adding cluster to scheme, %v", err) + os.Exit(1) + } + if err := AddToScheme(cliScheme.Scheme); err != nil { + klog.Errorf("Failed adding set to scheme, %v", err) + os.Exit(1) + } + + if err := v1.Install(scheme); err != nil { + klog.Errorf("Failed adding cluster to scheme, %v", err) + os.Exit(1) + } + if err := AddToScheme(scheme); err != nil { + klog.Errorf("Failed adding set to scheme, %v", err) + os.Exit(1) + } + + exitVal := m.Run() + os.Exit(exitVal) +} + +func (mcl clustersGetter) List(selector labels.Selector) ([]*v1.ManagedCluster, error) { + clusterList := v1.ManagedClusterList{} + err := mcl.client.List(context.Background(), &clusterList, &client.ListOptions{LabelSelector: selector}) + if err != nil { + return nil, err + } + var retClusters []*v1.ManagedCluster + for i := range clusterList.Items { + retClusters = append(retClusters, &clusterList.Items[i]) + } + return retClusters, nil +} + +func (msl clusterSetsGetter) List(selector labels.Selector) ([]*ManagedClusterSet, error) { + clusterSetList := ManagedClusterSetList{} + err := msl.client.List(context.Background(), &clusterSetList, &client.ListOptions{LabelSelector: selector}) + if err != nil { + return nil, err + } + var retClusterSets []*ManagedClusterSet + for i := range clusterSetList.Items { + retClusterSets = append(retClusterSets, &clusterSetList.Items[i]) + } + return retClusterSets, nil +} + +func (mbl clusterSetBindingsGetter) List(namespace string, + selector labels.Selector) ([]*ManagedClusterSetBinding, error) { + clusterSetBindingList := ManagedClusterSetBindingList{} + err := mbl.client.List(context.Background(), &clusterSetBindingList, + client.InNamespace(namespace), &client.ListOptions{LabelSelector: selector}) + if err != nil { + return nil, err + } + var retClusterSetBindings []*ManagedClusterSetBinding + for i := range clusterSetBindingList.Items { + retClusterSetBindings = append(retClusterSetBindings, &clusterSetBindingList.Items[i]) + } + return retClusterSetBindings, nil +} + +func TestGetClustersFromClusterSet(t *testing.T) { + tests := []struct { + name string + clusterset *ManagedClusterSet + expectClustersName sets.Set[string] + expectError bool + }{ + { + name: "test legacy cluster set", + clusterset: &ManagedClusterSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dev", + }, + Spec: ManagedClusterSetSpec{}, + }, + expectClustersName: sets.New[string]("c1", "c2"), + }, + { + name: "test label selector(openshift) cluster set", + clusterset: &ManagedClusterSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "openshift", + }, + Spec: ManagedClusterSetSpec{ + ClusterSelector: ManagedClusterSelector{ + SelectorType: LabelSelector, + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "vendor": "openshift", + }, + }, + }, + }, + }, + expectClustersName: sets.New[string]("c1", "c2"), + }, + { + name: "test global cluster set", + clusterset: &ManagedClusterSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "global", + }, + Spec: ManagedClusterSetSpec{ + ClusterSelector: ManagedClusterSelector{ + SelectorType: LabelSelector, + LabelSelector: &metav1.LabelSelector{}, + }, + }, + }, + expectClustersName: sets.New[string]("c1", "c2", "c3"), + }, + { + name: "test label selector cluster set", + clusterset: &ManagedClusterSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "invalidset", + }, + Spec: ManagedClusterSetSpec{ + ClusterSelector: ManagedClusterSelector{ + SelectorType: "invalidType", + }, + }, + }, + expectError: true, + }, + } + + var existingObjs []client.Object + for _, cluster := range existingClusters { + existingObjs = append(existingObjs, cluster) + } + mcl := clustersGetter{ + client: fake.NewClientBuilder().WithScheme(scheme).WithObjects(existingObjs...).Build(), + } + + for _, test := range tests { + clusters, err := GetClustersFromClusterSet(test.clusterset, mcl) + if err != nil { + if test.expectError { + continue + } + t.Errorf("Case: %v, Failed to run GetClustersFromClusterSet with clusterset: %v", test.name, test.clusterset) + return + } + returnClusters := convertClusterToSet(clusters) + if !reflect.DeepEqual(returnClusters, test.expectClustersName) { + t.Errorf("Case: %v, Failed to run GetClustersFromClusterSet. Expect clusters: %v, return cluster: %v", test.name, test.expectClustersName, returnClusters) + return + } + } +} + +func TestGetClusterSetsOfCluster(t *testing.T) { + tests := []struct { + name string + cluster v1.ManagedCluster + expectClusterSetName sets.Set[string] + expectError bool + }{ + { + name: "test c1 cluster", + cluster: v1.ManagedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "c1", + Labels: map[string]string{ + "vendor": "openshift", + ClusterSetLabel: "dev", + }, + }, + Spec: v1.ManagedClusterSpec{}, + }, + expectClusterSetName: sets.New[string]("dev", "openshift", "global"), + }, + { + name: "test c2 cluster", + cluster: v1.ManagedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "c2", + Labels: map[string]string{ + "cloud": "aws", + "vendor": "openshift", + ClusterSetLabel: "dev", + }, + }, + Spec: v1.ManagedClusterSpec{}, + }, + expectClusterSetName: sets.New[string]("dev", "openshift", "global"), + }, + { + name: "test c3 cluster", + cluster: v1.ManagedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "c2", + Labels: map[string]string{ + "cloud": "aws", + }, + }, + Spec: v1.ManagedClusterSpec{}, + }, + expectClusterSetName: sets.New[string]("global"), + }, + { + name: "test nonexist cluster in client", + cluster: v1.ManagedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "doNotExistCluster", + Labels: map[string]string{ + "cloud": "aws", + "vendor": "openshift", + }, + }, + Spec: v1.ManagedClusterSpec{}, + }, + expectClusterSetName: sets.New[string]("openshift", "global"), + }, + } + + var existingObjs []client.Object + for _, cluster := range existingClusters { + existingObjs = append(existingObjs, cluster) + } + for _, clusterset := range existingClusterSets { + existingObjs = append(existingObjs, clusterset) + } + + msl := clusterSetsGetter{ + client: fake.NewClientBuilder().WithScheme(scheme).WithObjects(existingObjs...).Build(), + } + + for _, test := range tests { + returnSets, err := GetClusterSetsOfCluster(&test.cluster, msl) + + if err != nil { + if test.expectError { + continue + } + t.Errorf("Case: %v, Failed to run GetClusterSetsOfCluster with cluster: %v", test.name, test.cluster) + return + } + returnClusters := convertClusterSetToSet(returnSets) + if !reflect.DeepEqual(returnClusters, test.expectClusterSetName) { + t.Errorf("Case: %v, Failed to run GetClusterSetsOfCluster. Expect clusters: %v, return cluster: %v", test.name, test.expectClusterSetName, returnClusters) + return + } + } +} + type FakePlacementDecisionGetter struct { FakeDecisions []*PlacementDecision } @@ -339,3 +717,89 @@ func TestPlacementDecisionClustersTracker_ExistingClusterGroups(t *testing.T) { } } } + +func convertClusterToSet(clusters []*v1.ManagedCluster) sets.Set[string] { + if len(clusters) == 0 { + return nil + } + retSet := sets.New[string]() + for _, cluster := range clusters { + retSet.Insert(cluster.Name) + } + return retSet +} + +func convertClusterSetToSet(clustersets []*ManagedClusterSet) sets.Set[string] { + if len(clustersets) == 0 { + return nil + } + retSet := sets.New[string]() + for _, clusterset := range clustersets { + retSet.Insert(clusterset.Name) + } + return retSet +} + +func convertClusterSetBindingsToSet(clusterSetBindings []*ManagedClusterSetBinding) sets.Set[string] { + if len(clusterSetBindings) == 0 { + return nil + } + retSet := sets.New[string]() + for _, clusterSetBinding := range clusterSetBindings { + retSet.Insert(clusterSetBinding.Name) + } + return retSet +} + +func TestGetValidManagedClusterSetBindings(t *testing.T) { + tests := []struct { + name string + namespace string + expectClusterSetBindingsNames sets.Set[string] + expectError bool + }{ + { + name: "test found valid cluster bindings only", + namespace: "default", + expectClusterSetBindingsNames: sets.New[string]("dev"), + }, + + { + name: "test no cluster binding found", + namespace: "kube-system", + expectClusterSetBindingsNames: nil, + }, + } + + var existingObjs []client.Object + for _, cluster := range existingClusters { + existingObjs = append(existingObjs, cluster) + } + for _, clusterSet := range existingClusterSets { + existingObjs = append(existingObjs, clusterSet) + } + for _, clusterSetBinding := range existingClusterSetBindings { + existingObjs = append(existingObjs, clusterSetBinding) + } + + mbl := clusterSetBindingsGetter{ + client: fake.NewClientBuilder().WithScheme(scheme).WithObjects(existingObjs...).Build(), + } + + for _, test := range tests { + returnSets, err := GetBoundManagedClusterSetBindings(test.namespace, mbl) + + if err != nil { + if test.expectError { + continue + } + t.Errorf("Case: %v, Failed to run GetValidManagedClusterSetBindings with namespace: %v", test.name, test.namespace) + return + } + returnBindings := convertClusterSetBindingsToSet(returnSets) + if !reflect.DeepEqual(returnBindings, test.expectClusterSetBindingsNames) { + t.Errorf("Case: %v, Failed to run GetValidManagedClusterSetBindings. Expect bindings: %v, return bindings: %v", test.name, test.expectClusterSetBindingsNames, returnBindings) + return + } + } +} diff --git a/cluster/v1beta1/register.go b/cluster/v1beta1/register.go index 0f9156d26..16a33d15b 100644 --- a/cluster/v1beta1/register.go +++ b/cluster/v1beta1/register.go @@ -30,6 +30,10 @@ func Resource(resource string) schema.GroupResource { // Adds the list of known types to api.Scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(GroupVersion, + &ManagedClusterSet{}, + &ManagedClusterSetList{}, + &ManagedClusterSetBinding{}, + &ManagedClusterSetBindingList{}, &Placement{}, &PlacementList{}, &PlacementDecision{}, diff --git a/cluster/v1beta1/types_managedclusterset.go b/cluster/v1beta1/types_managedclusterset.go new file mode 100644 index 000000000..2bc43f9e0 --- /dev/null +++ b/cluster/v1beta1/types_managedclusterset.go @@ -0,0 +1,99 @@ +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// LegacyClusterSetLabel LabelKey +const ClusterSetLabel = "cluster.open-cluster-management.io/clusterset" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope="Cluster",shortName={"mclset","mclsets"} +// +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="Empty",type="string",JSONPath=".status.conditions[?(@.type==\"ClusterSetEmpty\")].status" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" + +// ManagedClusterSet defines a group of ManagedClusters that user's workload can run on. +// A workload can be defined to deployed on a ManagedClusterSet, which mean: +// 1. The workload can run on any ManagedCluster in the ManagedClusterSet +// 2. The workload cannot run on any ManagedCluster outside the ManagedClusterSet +// 3. The service exposed by the workload can be shared in any ManagedCluster in the ManagedClusterSet +// +// In order to assign a ManagedCluster to a certian ManagedClusterSet, add a label with name +// `cluster.open-cluster-management.io/clusterset` on the ManagedCluster to refers to the ManagedClusterSet. +// User is not allow to add/remove this label on a ManagedCluster unless they have a RBAC rule to CREATE on +// a virtual subresource of managedclustersets/join. In order to update this label, user must have the permission +// on both the old and new ManagedClusterSet. +type ManagedClusterSet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec defines the attributes of the ManagedClusterSet + // +kubebuilder:default={clusterSelector: {selectorType: LegacyClusterSetLabel}} + Spec ManagedClusterSetSpec `json:"spec"` + + // Status represents the current status of the ManagedClusterSet + // +optional + Status ManagedClusterSetStatus `json:"status,omitempty"` +} + +// ManagedClusterSetSpec describes the attributes of the ManagedClusterSet +type ManagedClusterSetSpec struct { + // ClusterSelector represents a selector of ManagedClusters + // +optional + // +kubebuilder:default:={selectorType: LegacyClusterSetLabel} + ClusterSelector ManagedClusterSelector `json:"clusterSelector,omitempty"` +} + +// ManagedClusterSelector represents a selector of ManagedClusters +type ManagedClusterSelector struct { + // SelectorType could only be "LegacyClusterSetLabel" or "LabelSelector" + // "LegacyClusterSetLabel" means to use label "cluster.open-cluster-management.io/clusterset:"" to select target clusters. + // "LabelSelector" means use labelSelector to select target managedClusters + // +kubebuilder:validation:Enum=LegacyClusterSetLabel;LabelSelector + // +kubebuilder:default:=LegacyClusterSetLabel + // +required + SelectorType SelectorType `json:"selectorType,omitempty"` + + // LabelSelector define the general labelSelector which clusterset will use to select target managedClusters + // +optional + LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"` +} + +type SelectorType string + +const ( + // "LegacyClusterSetLabel" means to use label "cluster.open-cluster-management.io/clusterset:"" to select target clusters. + LegacyClusterSetLabel SelectorType = "LegacyClusterSetLabel" + // "LabelSelector" means use labelSelector to select target managedClusters + LabelSelector SelectorType = "LabelSelector" +) + +// ManagedClusterSetStatus represents the current status of the ManagedClusterSet. +type ManagedClusterSetStatus struct { + // Conditions contains the different condition statuses for this ManagedClusterSet. + Conditions []metav1.Condition `json:"conditions"` +} + +const ( + // ManagedClusterSetConditionEmpty means no ManagedCluster is included in the + // ManagedClusterSet. + ManagedClusterSetConditionEmpty string = "ClusterSetEmpty" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ManagedClusterSetList is a collection of ManagedClusterSet. +type ManagedClusterSetList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + // Items is a list of ManagedClusterSet. + Items []ManagedClusterSet `json:"items"` +} diff --git a/cluster/v1beta1/types_managedclustersetbinding.go b/cluster/v1beta1/types_managedclustersetbinding.go new file mode 100644 index 000000000..fed2c17d1 --- /dev/null +++ b/cluster/v1beta1/types_managedclustersetbinding.go @@ -0,0 +1,65 @@ +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope="Namespaced",shortName={"mclsetbinding","mclsetbindings"} +// +kubebuilder:storageversion + +// ManagedClusterSetBinding projects a ManagedClusterSet into a certain namespace. +// User is able to create a ManagedClusterSetBinding in a namespace and bind it to a +// ManagedClusterSet if they have an RBAC rule to CREATE on the virtual subresource of +// managedclustersets/bind. Workloads created in the same namespace can only be +// distributed to ManagedClusters in ManagedClusterSets bound in this namespace by +// higher level controllers. +type ManagedClusterSetBinding struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec defines the attributes of ManagedClusterSetBinding. + Spec ManagedClusterSetBindingSpec `json:"spec"` + + // Status represents the current status of the ManagedClusterSetBinding + // +optional + Status ManagedClusterSetBindingStatus `json:"status,omitempty"` +} + +// ManagedClusterSetBindingSpec defines the attributes of ManagedClusterSetBinding. +type ManagedClusterSetBindingSpec struct { + // ClusterSet is the name of the ManagedClusterSet to bind. It must match the + // instance name of the ManagedClusterSetBinding and cannot change once created. + // User is allowed to set this field if they have an RBAC rule to CREATE on the + // virtual subresource of managedclustersets/bind. + // +kubebuilder:validation:MinLength=1 + ClusterSet string `json:"clusterSet"` +} + +const ( + // ClusterSetBindingBoundType is a condition type of clustersetbinding representing + // whether the ClusterSetBinding is bound to a clusterset. + ClusterSetBindingBoundType = "Bound" +) + +// ManagedClusterSetBindingStatus represents the current status of the ManagedClusterSetBinding. +type ManagedClusterSetBindingStatus struct { + // Conditions contains the different condition statuses for this ManagedClusterSetBinding. + Conditions []metav1.Condition `json:"conditions"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ManagedClusterSetBindingList is a collection of ManagedClusterSetBinding. +type ManagedClusterSetBindingList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + // Items is a list of ManagedClusterSetBinding. + Items []ManagedClusterSetBinding `json:"items"` +} diff --git a/cluster/v1beta1/zz_generated.deepcopy.go b/cluster/v1beta1/zz_generated.deepcopy.go index 347b89457..5d1166b2f 100644 --- a/cluster/v1beta1/zz_generated.deepcopy.go +++ b/cluster/v1beta1/zz_generated.deepcopy.go @@ -212,6 +212,228 @@ func (in *GroupStrategy) DeepCopy() *GroupStrategy { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedClusterSelector) DeepCopyInto(out *ManagedClusterSelector) { + *out = *in + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterSelector. +func (in *ManagedClusterSelector) DeepCopy() *ManagedClusterSelector { + if in == nil { + return nil + } + out := new(ManagedClusterSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedClusterSet) DeepCopyInto(out *ManagedClusterSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterSet. +func (in *ManagedClusterSet) DeepCopy() *ManagedClusterSet { + if in == nil { + return nil + } + out := new(ManagedClusterSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ManagedClusterSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedClusterSetBinding) DeepCopyInto(out *ManagedClusterSetBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterSetBinding. +func (in *ManagedClusterSetBinding) DeepCopy() *ManagedClusterSetBinding { + if in == nil { + return nil + } + out := new(ManagedClusterSetBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ManagedClusterSetBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedClusterSetBindingList) DeepCopyInto(out *ManagedClusterSetBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ManagedClusterSetBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterSetBindingList. +func (in *ManagedClusterSetBindingList) DeepCopy() *ManagedClusterSetBindingList { + if in == nil { + return nil + } + out := new(ManagedClusterSetBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ManagedClusterSetBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedClusterSetBindingSpec) DeepCopyInto(out *ManagedClusterSetBindingSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterSetBindingSpec. +func (in *ManagedClusterSetBindingSpec) DeepCopy() *ManagedClusterSetBindingSpec { + if in == nil { + return nil + } + out := new(ManagedClusterSetBindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedClusterSetBindingStatus) DeepCopyInto(out *ManagedClusterSetBindingStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterSetBindingStatus. +func (in *ManagedClusterSetBindingStatus) DeepCopy() *ManagedClusterSetBindingStatus { + if in == nil { + return nil + } + out := new(ManagedClusterSetBindingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedClusterSetList) DeepCopyInto(out *ManagedClusterSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ManagedClusterSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterSetList. +func (in *ManagedClusterSetList) DeepCopy() *ManagedClusterSetList { + if in == nil { + return nil + } + out := new(ManagedClusterSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ManagedClusterSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedClusterSetSpec) DeepCopyInto(out *ManagedClusterSetSpec) { + *out = *in + in.ClusterSelector.DeepCopyInto(&out.ClusterSelector) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterSetSpec. +func (in *ManagedClusterSetSpec) DeepCopy() *ManagedClusterSetSpec { + if in == nil { + return nil + } + out := new(ManagedClusterSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedClusterSetStatus) DeepCopyInto(out *ManagedClusterSetStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterSetStatus. +func (in *ManagedClusterSetStatus) DeepCopy() *ManagedClusterSetStatus { + if in == nil { + return nil + } + out := new(ManagedClusterSetStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Placement) DeepCopyInto(out *Placement) { *out = *in diff --git a/cluster/v1beta1/zz_generated.swagger_doc_generated.go b/cluster/v1beta1/zz_generated.swagger_doc_generated.go index bc134648b..691aa1659 100644 --- a/cluster/v1beta1/zz_generated.swagger_doc_generated.go +++ b/cluster/v1beta1/zz_generated.swagger_doc_generated.go @@ -11,6 +11,92 @@ package v1beta1 // Those methods can be generated by using hack/update-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE +var map_ManagedClusterSelector = map[string]string{ + "": "ManagedClusterSelector represents a selector of ManagedClusters", + "selectorType": "SelectorType could only be \"LegacyClusterSetLabel\" or \"LabelSelector\" \"LegacyClusterSetLabel\" means to use label \"cluster.open-cluster-management.io/clusterset:\"\" to select target clusters. \"LabelSelector\" means use labelSelector to select target managedClusters", + "labelSelector": "LabelSelector define the general labelSelector which clusterset will use to select target managedClusters", +} + +func (ManagedClusterSelector) SwaggerDoc() map[string]string { + return map_ManagedClusterSelector +} + +var map_ManagedClusterSet = map[string]string{ + "": "ManagedClusterSet defines a group of ManagedClusters that user's workload can run on. A workload can be defined to deployed on a ManagedClusterSet, which mean:\n 1. The workload can run on any ManagedCluster in the ManagedClusterSet\n 2. The workload cannot run on any ManagedCluster outside the ManagedClusterSet\n 3. The service exposed by the workload can be shared in any ManagedCluster in the ManagedClusterSet\n\nIn order to assign a ManagedCluster to a certian ManagedClusterSet, add a label with name `cluster.open-cluster-management.io/clusterset` on the ManagedCluster to refers to the ManagedClusterSet. User is not allow to add/remove this label on a ManagedCluster unless they have a RBAC rule to CREATE on a virtual subresource of managedclustersets/join. In order to update this label, user must have the permission on both the old and new ManagedClusterSet.", + "spec": "Spec defines the attributes of the ManagedClusterSet", + "status": "Status represents the current status of the ManagedClusterSet", +} + +func (ManagedClusterSet) SwaggerDoc() map[string]string { + return map_ManagedClusterSet +} + +var map_ManagedClusterSetList = map[string]string{ + "": "ManagedClusterSetList is a collection of ManagedClusterSet.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "Items is a list of ManagedClusterSet.", +} + +func (ManagedClusterSetList) SwaggerDoc() map[string]string { + return map_ManagedClusterSetList +} + +var map_ManagedClusterSetSpec = map[string]string{ + "": "ManagedClusterSetSpec describes the attributes of the ManagedClusterSet", + "clusterSelector": "ClusterSelector represents a selector of ManagedClusters", +} + +func (ManagedClusterSetSpec) SwaggerDoc() map[string]string { + return map_ManagedClusterSetSpec +} + +var map_ManagedClusterSetStatus = map[string]string{ + "": "ManagedClusterSetStatus represents the current status of the ManagedClusterSet.", + "conditions": "Conditions contains the different condition statuses for this ManagedClusterSet.", +} + +func (ManagedClusterSetStatus) SwaggerDoc() map[string]string { + return map_ManagedClusterSetStatus +} + +var map_ManagedClusterSetBinding = map[string]string{ + "": "ManagedClusterSetBinding projects a ManagedClusterSet into a certain namespace. User is able to create a ManagedClusterSetBinding in a namespace and bind it to a ManagedClusterSet if they have an RBAC rule to CREATE on the virtual subresource of managedclustersets/bind. Workloads created in the same namespace can only be distributed to ManagedClusters in ManagedClusterSets bound in this namespace by higher level controllers.", + "spec": "Spec defines the attributes of ManagedClusterSetBinding.", + "status": "Status represents the current status of the ManagedClusterSetBinding", +} + +func (ManagedClusterSetBinding) SwaggerDoc() map[string]string { + return map_ManagedClusterSetBinding +} + +var map_ManagedClusterSetBindingList = map[string]string{ + "": "ManagedClusterSetBindingList is a collection of ManagedClusterSetBinding.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "Items is a list of ManagedClusterSetBinding.", +} + +func (ManagedClusterSetBindingList) SwaggerDoc() map[string]string { + return map_ManagedClusterSetBindingList +} + +var map_ManagedClusterSetBindingSpec = map[string]string{ + "": "ManagedClusterSetBindingSpec defines the attributes of ManagedClusterSetBinding.", + "clusterSet": "ClusterSet is the name of the ManagedClusterSet to bind. It must match the instance name of the ManagedClusterSetBinding and cannot change once created. User is allowed to set this field if they have an RBAC rule to CREATE on the virtual subresource of managedclustersets/bind.", +} + +func (ManagedClusterSetBindingSpec) SwaggerDoc() map[string]string { + return map_ManagedClusterSetBindingSpec +} + +var map_ManagedClusterSetBindingStatus = map[string]string{ + "": "ManagedClusterSetBindingStatus represents the current status of the ManagedClusterSetBinding.", + "conditions": "Conditions contains the different condition statuses for this ManagedClusterSetBinding.", +} + +func (ManagedClusterSetBindingStatus) SwaggerDoc() map[string]string { + return map_ManagedClusterSetBindingStatus +} + var map_AddOnScore = map[string]string{ "": "AddOnScore represents the configuration of the addon score source.", "resourceName": "ResourceName defines the resource name of the AddOnPlacementScore. The placement prioritizer selects AddOnPlacementScore CR by this name.", diff --git a/cluster/v1beta2/0000_00_clusters.open-cluster-management.io_managedclustersets.crd.yaml b/cluster/v1beta2/0000_00_clusters.open-cluster-management.io_managedclustersets.crd.yaml index 35109c2e5..c0d11bb93 100644 --- a/cluster/v1beta2/0000_00_clusters.open-cluster-management.io_managedclustersets.crd.yaml +++ b/cluster/v1beta2/0000_00_clusters.open-cluster-management.io_managedclustersets.crd.yaml @@ -22,7 +22,10 @@ spec: - jsonPath: .metadata.creationTimestamp name: Age type: date - name: v1beta2 + deprecated: true + deprecationWarning: cluster.open-cluster-management.io/v1beta1 ManagedClusterSet + is deprecated; use cluster.open-cluster-management.io/v1beta2 ManagedClusterSet + name: v1beta1 schema: openAPIV3Schema: description: "ManagedClusterSet defines a group of ManagedClusters that user's @@ -53,12 +56,12 @@ spec: spec: default: clusterSelector: - selectorType: ExclusiveClusterSetLabel + selectorType: LegacyClusterSetLabel description: Spec defines the attributes of the ManagedClusterSet properties: clusterSelector: default: - selectorType: ExclusiveClusterSetLabel + selectorType: LegacyClusterSetLabel description: ClusterSelector represents a selector of ManagedClusters properties: labelSelector: @@ -108,14 +111,14 @@ spec: type: object x-kubernetes-map-type: atomic selectorType: - default: ExclusiveClusterSetLabel - description: SelectorType could only be "ExclusiveClusterSetLabel" - or "LabelSelector" "ExclusiveClusterSetLabel" means to use label + default: LegacyClusterSetLabel + description: SelectorType could only be "LegacyClusterSetLabel" + or "LabelSelector" "LegacyClusterSetLabel" means to use label "cluster.open-cluster-management.io/clusterset:"" to select target clusters. "LabelSelector" means use labelSelector to select target managedClusters enum: - - ExclusiveClusterSetLabel + - LegacyClusterSetLabel - LabelSelector type: string type: object @@ -196,6 +199,178 @@ spec: type: object type: object served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="ClusterSetEmpty")].status + name: Empty + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: "ManagedClusterSet defines a group of ManagedClusters that user's + workload can run on. A workload can be defined to deployed on a ManagedClusterSet, + which mean: 1. The workload can run on any ManagedCluster in the ManagedClusterSet + 2. The workload cannot run on any ManagedCluster outside the ManagedClusterSet + 3. The service exposed by the workload can be shared in any ManagedCluster + in the ManagedClusterSet \n In order to assign a ManagedCluster to a certian + ManagedClusterSet, add a label with name `cluster.open-cluster-management.io/clusterset` + on the ManagedCluster to refers to the ManagedClusterSet. User is not allow + to add/remove this label on a ManagedCluster unless they have a RBAC rule + to CREATE on a virtual subresource of managedclustersets/join. In order + to update this label, user must have the permission on both the old and + new ManagedClusterSet." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + default: + clusterSelector: + selectorType: ExclusiveClusterSetLabel + description: Spec defines the attributes of the ManagedClusterSet + properties: + clusterSelector: + default: + selectorType: ExclusiveClusterSetLabel + description: ClusterSelector represents a selector of ManagedClusters + properties: + labelSelector: + description: LabelSelector define the general labelSelector which + clusterset will use to select target managedClusters + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If + the operator is In or NotIn, the values array must + be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A + single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is "key", + the operator is "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + selectorType: + default: ExclusiveClusterSetLabel + description: SelectorType could only be "ExclusiveClusterSetLabel" + or "LabelSelector" "ExclusiveClusterSetLabel" means to use label + "cluster.open-cluster-management.io/clusterset:"" to select target clusters. "LabelSelector" means use + labelSelector to select target managedClusters + enum: + - ExclusiveClusterSetLabel + - LabelSelector + type: string + type: object + type: object + status: + description: Status represents the current status of the ManagedClusterSet + properties: + conditions: + description: Conditions contains the different condition statuses + for this ManagedClusterSet. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + type: string + status: + description: status of the condition, one of True, False, Unknown. + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + type: object + type: object + served: true storage: true subresources: status: {} diff --git a/cluster/v1beta2/0000_01_clusters.open-cluster-management.io_managedclustersetbindings.crd.yaml b/cluster/v1beta2/0000_01_clusters.open-cluster-management.io_managedclustersetbindings.crd.yaml index cfba3ffa1..e07e082e2 100644 --- a/cluster/v1beta2/0000_01_clusters.open-cluster-management.io_managedclustersetbindings.crd.yaml +++ b/cluster/v1beta2/0000_01_clusters.open-cluster-management.io_managedclustersetbindings.crd.yaml @@ -15,7 +15,10 @@ spec: preserveUnknownFields: false scope: Namespaced versions: - - name: v1beta2 + - deprecated: true + deprecationWarning: cluster.open-cluster-management.io/v1beta1 ManagedClusterSetBinding + is deprecated; use cluster.open-cluster-management.io/v1beta2 ManagedClusterSetBinding + name: v1beta1 schema: openAPIV3Schema: description: ManagedClusterSetBinding projects a ManagedClusterSet into a @@ -125,6 +128,108 @@ spec: type: object type: object served: true + storage: false + subresources: + status: {} + - name: v1beta2 + schema: + openAPIV3Schema: + description: ManagedClusterSetBinding projects a ManagedClusterSet into a + certain namespace. User is able to create a ManagedClusterSetBinding in + a namespace and bind it to a ManagedClusterSet if they have an RBAC rule + to CREATE on the virtual subresource of managedclustersets/bind. Workloads + created in the same namespace can only be distributed to ManagedClusters + in ManagedClusterSets bound in this namespace by higher level controllers. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec defines the attributes of ManagedClusterSetBinding. + properties: + clusterSet: + description: ClusterSet is the name of the ManagedClusterSet to bind. + It must match the instance name of the ManagedClusterSetBinding + and cannot change once created. User is allowed to set this field + if they have an RBAC rule to CREATE on the virtual subresource of + managedclustersets/bind. + minLength: 1 + type: string + type: object + status: + description: Status represents the current status of the ManagedClusterSetBinding + properties: + conditions: + description: Conditions contains the different condition statuses + for this ManagedClusterSetBinding. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + type: string + status: + description: status of the condition, one of True, False, Unknown. + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + type: object + type: object + served: true storage: true subresources: status: {} diff --git a/hack/update-deepcopy.sh b/hack/update-deepcopy.sh index 0d7d06e62..a91d7cf25 100755 --- a/hack/update-deepcopy.sh +++ b/hack/update-deepcopy.sh @@ -7,9 +7,12 @@ CODEGEN_PKG=${CODEGEN_PKG:-$(cd ${SCRIPT_ROOT}; ls -d -1 ./vendor/k8s.io/code-ge verify="${VERIFY:-}" +# cluster:v1alpha1 is generated in the Makefile target 'update-scripts' using controller-gen +# because gengo isn't respecting deepcopy-gen:false nor does it support generics +# Issue: https://github.com/kubernetes/gengo/issues/225 GOFLAGS="" bash ${CODEGEN_PKG}/generate-groups.sh "deepcopy" \ open-cluster-management.io/api/generated \ open-cluster-management.io/api \ - "cluster:v1 cluster:v1alpha1 cluster:v1beta1 cluster:v1beta2 work:v1alpha1 work:v1 operator:v1 addon:v1alpha1" \ + "cluster:v1 cluster:v1beta1 cluster:v1beta2 work:v1alpha1 work:v1 operator:v1 addon:v1alpha1" \ --go-header-file ${SCRIPT_ROOT}/hack/empty.txt \ ${verify}