From 080da9bdea4660297a1ead1eee472dd0589477a4 Mon Sep 17 00:00:00 2001 From: Traian Schiau Date: Thu, 18 Jan 2024 13:15:08 +0200 Subject: [PATCH 1/9] [api/v1alpha1] Make MultiKueueCluster an API object. --- apis/kueue/v1alpha1/multikueueconfig_types.go | 65 +++-- apis/kueue/v1alpha1/zz_generated.deepcopy.go | 86 ++++++- .../kueue.x-k8s.io_multikueueclusters.yaml | 146 +++++++++++ .../crd/kueue.x-k8s.io_multikueueconfigs.yaml | 38 +-- .../kueue/v1alpha1/multikueuecluster.go | 188 +++++++++++++- .../kueue/v1alpha1/multikueueclusterspec.go | 38 +++ .../kueue/v1alpha1/multikueueclusterstatus.go | 44 ++++ .../kueue/v1alpha1/multikueueconfigspec.go | 9 +- client-go/applyconfiguration/utils.go | 4 + .../kueue/v1alpha1/fake/fake_kueue_client.go | 4 + .../v1alpha1/fake/fake_multikueuecluster.go | 177 +++++++++++++ .../kueue/v1alpha1/generated_expansion.go | 2 + .../typed/kueue/v1alpha1/kueue_client.go | 5 + .../typed/kueue/v1alpha1/multikueuecluster.go | 242 ++++++++++++++++++ .../informers/externalversions/generic.go | 2 + .../kueue/v1alpha1/interface.go | 7 + .../kueue/v1alpha1/multikueuecluster.go | 88 +++++++ .../kueue/v1alpha1/expansion_generated.go | 4 + .../kueue/v1alpha1/multikueuecluster.go | 67 +++++ .../kueue.x-k8s.io_multikueueclusters.yaml | 133 ++++++++++ .../kueue.x-k8s.io_multikueueconfigs.yaml | 38 +-- config/components/crd/kustomization.yaml | 1 + 22 files changed, 1288 insertions(+), 100 deletions(-) create mode 100644 charts/kueue/templates/crd/kueue.x-k8s.io_multikueueclusters.yaml create mode 100644 client-go/applyconfiguration/kueue/v1alpha1/multikueueclusterspec.go create mode 100644 client-go/applyconfiguration/kueue/v1alpha1/multikueueclusterstatus.go create mode 100644 client-go/clientset/versioned/typed/kueue/v1alpha1/fake/fake_multikueuecluster.go create mode 100644 client-go/clientset/versioned/typed/kueue/v1alpha1/multikueuecluster.go create mode 100644 client-go/informers/externalversions/kueue/v1alpha1/multikueuecluster.go create mode 100644 client-go/listers/kueue/v1alpha1/multikueuecluster.go create mode 100644 config/components/crd/bases/kueue.x-k8s.io_multikueueclusters.yaml diff --git a/apis/kueue/v1alpha1/multikueueconfig_types.go b/apis/kueue/v1alpha1/multikueueconfig_types.go index 3be3db2f29..1fa40fb5d5 100644 --- a/apis/kueue/v1alpha1/multikueueconfig_types.go +++ b/apis/kueue/v1alpha1/multikueueconfig_types.go @@ -24,21 +24,9 @@ const ( MultiKueueConfigSecretKey = "kubeconfig" ) -// MultiKueueConfigSpec defines the desired state of MultiKueueConfig -type MultiKueueConfigSpec struct { - // clusters contains the list of configurations for using worker clusters. - // - // +listType=map - // +listMapKey=name - // +kubebuilder:validation:MinItems=1 - // +kubebuilder:validation:MaxItems=100 - Clusters []MultiKueueCluster `json:"clusters"` -} - -type MultiKueueCluster struct { - Name string `json:"name"` - KubeconfigRef KubeconfigRef `json:"kubeconfigRef"` -} +const ( + MultiKueueClusterActive = "Active" +) type LocationType string @@ -62,13 +50,56 @@ type KubeconfigRef struct { LocationType LocationType `json:"locationType"` } +type MultiKueueClusterSpec struct { + KubeconfigRef KubeconfigRef `json:"kubeconfigRef"` +} + +type MultiKueueClusterStatus struct { + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` +} + +//+genclient +//+genclient:nonNamespaced +//+kubebuilder:object:root=true +//+kubebuilder:storageversion +//+kubebuilder:subresource:status +//+kubebuilder:resource:scope=Cluster + +// MultiKueueCluster is the Schema for the multikueue API +type MultiKueueCluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec MultiKueueClusterSpec `json:"spec,omitempty"` + Status MultiKueueClusterStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// MultiKueueClusterList contains a list of MultiKueueCluster +type MultiKueueClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MultiKueueCluster `json:"items"` +} + +// MultiKueueConfigSpec defines the desired state of MultiKueueConfig +type MultiKueueConfigSpec struct { + // List of MultiKueueClusters names where the workloads from the ClusterQueue should be distributed. + // + // +listType=set + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=100 + Clusters []string `json:"clusters"` +} + //+genclient //+genclient:nonNamespaced //+kubebuilder:object:root=true //+kubebuilder:storageversion //+kubebuilder:resource:scope=Cluster -// MultiKueueConfig is the Schema for the provisioningrequestconfig API +// MultiKueueConfig is the Schema for the multikueue API type MultiKueueConfig struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -86,5 +117,5 @@ type MultiKueueConfigList struct { } func init() { - SchemeBuilder.Register(&MultiKueueConfig{}, &MultiKueueConfigList{}) + SchemeBuilder.Register(&MultiKueueConfig{}, &MultiKueueConfigList{}, &MultiKueueCluster{}, &MultiKueueClusterList{}) } diff --git a/apis/kueue/v1alpha1/zz_generated.deepcopy.go b/apis/kueue/v1alpha1/zz_generated.deepcopy.go index 8c0ca58d4f..cfd70d1936 100644 --- a/apis/kueue/v1alpha1/zz_generated.deepcopy.go +++ b/apis/kueue/v1alpha1/zz_generated.deepcopy.go @@ -22,6 +22,7 @@ limitations under the License. package v1alpha1 import ( + "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -43,7 +44,10 @@ func (in *KubeconfigRef) DeepCopy() *KubeconfigRef { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MultiKueueCluster) DeepCopyInto(out *MultiKueueCluster) { *out = *in - out.KubeconfigRef = in.KubeconfigRef + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiKueueCluster. @@ -56,6 +60,84 @@ func (in *MultiKueueCluster) DeepCopy() *MultiKueueCluster { return out } +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MultiKueueCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiKueueClusterList) DeepCopyInto(out *MultiKueueClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MultiKueueCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiKueueClusterList. +func (in *MultiKueueClusterList) DeepCopy() *MultiKueueClusterList { + if in == nil { + return nil + } + out := new(MultiKueueClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MultiKueueClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiKueueClusterSpec) DeepCopyInto(out *MultiKueueClusterSpec) { + *out = *in + out.KubeconfigRef = in.KubeconfigRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiKueueClusterSpec. +func (in *MultiKueueClusterSpec) DeepCopy() *MultiKueueClusterSpec { + if in == nil { + return nil + } + out := new(MultiKueueClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiKueueClusterStatus) DeepCopyInto(out *MultiKueueClusterStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiKueueClusterStatus. +func (in *MultiKueueClusterStatus) DeepCopy() *MultiKueueClusterStatus { + if in == nil { + return nil + } + out := new(MultiKueueClusterStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MultiKueueConfig) DeepCopyInto(out *MultiKueueConfig) { *out = *in @@ -119,7 +201,7 @@ func (in *MultiKueueConfigSpec) DeepCopyInto(out *MultiKueueConfigSpec) { *out = *in if in.Clusters != nil { in, out := &in.Clusters, &out.Clusters - *out = make([]MultiKueueCluster, len(*in)) + *out = make([]string, len(*in)) copy(*out, *in) } } diff --git a/charts/kueue/templates/crd/kueue.x-k8s.io_multikueueclusters.yaml b/charts/kueue/templates/crd/kueue.x-k8s.io_multikueueclusters.yaml new file mode 100644 index 0000000000..46af7d997c --- /dev/null +++ b/charts/kueue/templates/crd/kueue.x-k8s.io_multikueueclusters.yaml @@ -0,0 +1,146 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + {{- if .Values.enableCertManager }} + cert-manager.io/inject-ca-from: {{ .Release.Namespace }}/{{ include "kueue.fullname" . }}-serving-cert + {{- end }} + controller-gen.kubebuilder.io/version: v0.12.0 + name: multikueueclusters.kueue.x-k8s.io +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + name: {{ include "kueue.fullname" . }}-webhook-service + namespace: '{{ .Release.Namespace }}' + path: /convert + conversionReviewVersions: + - v1 + group: kueue.x-k8s.io + names: + kind: MultiKueueCluster + listKind: MultiKueueClusterList + plural: multikueueclusters + singular: multikueuecluster + scope: Cluster + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: MultiKueueCluster is the Schema for the multikueue API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + kubeconfigRef: + properties: + location: + description: Location of the KubeConfig. + type: string + locationType: + default: Secret + description: Type of the KubeConfig location. + enum: + - Secret + type: string + name: + description: Name of the cluster inside the given KubeConfig. + type: string + required: + - location + - locationType + - name + type: object + required: + - kubeconfigRef + type: object + status: + properties: + conditions: + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/charts/kueue/templates/crd/kueue.x-k8s.io_multikueueconfigs.yaml b/charts/kueue/templates/crd/kueue.x-k8s.io_multikueueconfigs.yaml index a38b5ca122..d457faec3a 100644 --- a/charts/kueue/templates/crd/kueue.x-k8s.io_multikueueconfigs.yaml +++ b/charts/kueue/templates/crd/kueue.x-k8s.io_multikueueconfigs.yaml @@ -30,8 +30,7 @@ spec: - name: v1alpha1 schema: openAPIV3Schema: - description: MultiKueueConfig is the Schema for the provisioningrequestconfig - API + description: MultiKueueConfig is the Schema for the multikueue API properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation @@ -49,41 +48,14 @@ spec: description: MultiKueueConfigSpec defines the desired state of MultiKueueConfig properties: clusters: - description: clusters contains the list of configurations for using - worker clusters. + description: List of MultiKueueClusters names where the workloads + from the ClusterQueue should be distributed. items: - properties: - kubeconfigRef: - properties: - location: - description: Location of the KubeConfig. - type: string - locationType: - default: Secret - description: Type of the KubeConfig location. - enum: - - Secret - type: string - name: - description: Name of the cluster inside the given KubeConfig. - type: string - required: - - location - - locationType - - name - type: object - name: - type: string - required: - - kubeconfigRef - - name - type: object + type: string maxItems: 100 minItems: 1 type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map + x-kubernetes-list-type: set required: - clusters type: object diff --git a/client-go/applyconfiguration/kueue/v1alpha1/multikueuecluster.go b/client-go/applyconfiguration/kueue/v1alpha1/multikueuecluster.go index 28a3a76865..cd1300384a 100644 --- a/client-go/applyconfiguration/kueue/v1alpha1/multikueuecluster.go +++ b/client-go/applyconfiguration/kueue/v1alpha1/multikueuecluster.go @@ -17,31 +17,201 @@ limitations under the License. package v1alpha1 +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + // MultiKueueClusterApplyConfiguration represents an declarative configuration of the MultiKueueCluster type for use // with apply. type MultiKueueClusterApplyConfiguration struct { - Name *string `json:"name,omitempty"` - KubeconfigRef *KubeconfigRefApplyConfiguration `json:"kubeconfigRef,omitempty"` + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *MultiKueueClusterSpecApplyConfiguration `json:"spec,omitempty"` + Status *MultiKueueClusterStatusApplyConfiguration `json:"status,omitempty"` } -// MultiKueueClusterApplyConfiguration constructs an declarative configuration of the MultiKueueCluster type for use with +// MultiKueueCluster constructs an declarative configuration of the MultiKueueCluster type for use with // apply. -func MultiKueueCluster() *MultiKueueClusterApplyConfiguration { - return &MultiKueueClusterApplyConfiguration{} +func MultiKueueCluster(name string) *MultiKueueClusterApplyConfiguration { + b := &MultiKueueClusterApplyConfiguration{} + b.WithName(name) + b.WithKind("MultiKueueCluster") + b.WithAPIVersion("kueue.x-k8s.io/v1alpha1") + return b +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *MultiKueueClusterApplyConfiguration) WithKind(value string) *MultiKueueClusterApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *MultiKueueClusterApplyConfiguration) WithAPIVersion(value string) *MultiKueueClusterApplyConfiguration { + b.APIVersion = &value + return b } // WithName sets the Name field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. func (b *MultiKueueClusterApplyConfiguration) WithName(value string) *MultiKueueClusterApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() b.Name = &value return b } -// WithKubeconfigRef sets the KubeconfigRef field in the declarative configuration to the given value +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *MultiKueueClusterApplyConfiguration) WithGenerateName(value string) *MultiKueueClusterApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *MultiKueueClusterApplyConfiguration) WithNamespace(value string) *MultiKueueClusterApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *MultiKueueClusterApplyConfiguration) WithUID(value types.UID) *MultiKueueClusterApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *MultiKueueClusterApplyConfiguration) WithResourceVersion(value string) *MultiKueueClusterApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *MultiKueueClusterApplyConfiguration) WithGeneration(value int64) *MultiKueueClusterApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *MultiKueueClusterApplyConfiguration) WithCreationTimestamp(value metav1.Time) *MultiKueueClusterApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *MultiKueueClusterApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *MultiKueueClusterApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *MultiKueueClusterApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *MultiKueueClusterApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *MultiKueueClusterApplyConfiguration) WithLabels(entries map[string]string) *MultiKueueClusterApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *MultiKueueClusterApplyConfiguration) WithAnnotations(entries map[string]string) *MultiKueueClusterApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *MultiKueueClusterApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *MultiKueueClusterApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *MultiKueueClusterApplyConfiguration) WithFinalizers(values ...string) *MultiKueueClusterApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *MultiKueueClusterApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *MultiKueueClusterApplyConfiguration) WithSpec(value *MultiKueueClusterSpecApplyConfiguration) *MultiKueueClusterApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the KubeconfigRef field is set to the value of the last call. -func (b *MultiKueueClusterApplyConfiguration) WithKubeconfigRef(value *KubeconfigRefApplyConfiguration) *MultiKueueClusterApplyConfiguration { - b.KubeconfigRef = value +// If called multiple times, the Status field is set to the value of the last call. +func (b *MultiKueueClusterApplyConfiguration) WithStatus(value *MultiKueueClusterStatusApplyConfiguration) *MultiKueueClusterApplyConfiguration { + b.Status = value return b } diff --git a/client-go/applyconfiguration/kueue/v1alpha1/multikueueclusterspec.go b/client-go/applyconfiguration/kueue/v1alpha1/multikueueclusterspec.go new file mode 100644 index 0000000000..eb64f01800 --- /dev/null +++ b/client-go/applyconfiguration/kueue/v1alpha1/multikueueclusterspec.go @@ -0,0 +1,38 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// MultiKueueClusterSpecApplyConfiguration represents an declarative configuration of the MultiKueueClusterSpec type for use +// with apply. +type MultiKueueClusterSpecApplyConfiguration struct { + KubeconfigRef *KubeconfigRefApplyConfiguration `json:"kubeconfigRef,omitempty"` +} + +// MultiKueueClusterSpecApplyConfiguration constructs an declarative configuration of the MultiKueueClusterSpec type for use with +// apply. +func MultiKueueClusterSpec() *MultiKueueClusterSpecApplyConfiguration { + return &MultiKueueClusterSpecApplyConfiguration{} +} + +// WithKubeconfigRef sets the KubeconfigRef field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the KubeconfigRef field is set to the value of the last call. +func (b *MultiKueueClusterSpecApplyConfiguration) WithKubeconfigRef(value *KubeconfigRefApplyConfiguration) *MultiKueueClusterSpecApplyConfiguration { + b.KubeconfigRef = value + return b +} diff --git a/client-go/applyconfiguration/kueue/v1alpha1/multikueueclusterstatus.go b/client-go/applyconfiguration/kueue/v1alpha1/multikueueclusterstatus.go new file mode 100644 index 0000000000..c01eaf7d06 --- /dev/null +++ b/client-go/applyconfiguration/kueue/v1alpha1/multikueueclusterstatus.go @@ -0,0 +1,44 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// MultiKueueClusterStatusApplyConfiguration represents an declarative configuration of the MultiKueueClusterStatus type for use +// with apply. +type MultiKueueClusterStatusApplyConfiguration struct { + Conditions []v1.Condition `json:"conditions,omitempty"` +} + +// MultiKueueClusterStatusApplyConfiguration constructs an declarative configuration of the MultiKueueClusterStatus type for use with +// apply. +func MultiKueueClusterStatus() *MultiKueueClusterStatusApplyConfiguration { + return &MultiKueueClusterStatusApplyConfiguration{} +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *MultiKueueClusterStatusApplyConfiguration) WithConditions(values ...v1.Condition) *MultiKueueClusterStatusApplyConfiguration { + for i := range values { + b.Conditions = append(b.Conditions, values[i]) + } + return b +} diff --git a/client-go/applyconfiguration/kueue/v1alpha1/multikueueconfigspec.go b/client-go/applyconfiguration/kueue/v1alpha1/multikueueconfigspec.go index 35690245e2..143954a1c0 100644 --- a/client-go/applyconfiguration/kueue/v1alpha1/multikueueconfigspec.go +++ b/client-go/applyconfiguration/kueue/v1alpha1/multikueueconfigspec.go @@ -20,7 +20,7 @@ package v1alpha1 // MultiKueueConfigSpecApplyConfiguration represents an declarative configuration of the MultiKueueConfigSpec type for use // with apply. type MultiKueueConfigSpecApplyConfiguration struct { - Clusters []MultiKueueClusterApplyConfiguration `json:"clusters,omitempty"` + Clusters []string `json:"clusters,omitempty"` } // MultiKueueConfigSpecApplyConfiguration constructs an declarative configuration of the MultiKueueConfigSpec type for use with @@ -32,12 +32,9 @@ func MultiKueueConfigSpec() *MultiKueueConfigSpecApplyConfiguration { // WithClusters adds the given value to the Clusters field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Clusters field. -func (b *MultiKueueConfigSpecApplyConfiguration) WithClusters(values ...*MultiKueueClusterApplyConfiguration) *MultiKueueConfigSpecApplyConfiguration { +func (b *MultiKueueConfigSpecApplyConfiguration) WithClusters(values ...string) *MultiKueueConfigSpecApplyConfiguration { for i := range values { - if values[i] == nil { - panic("nil value passed to WithClusters") - } - b.Clusters = append(b.Clusters, *values[i]) + b.Clusters = append(b.Clusters, values[i]) } return b } diff --git a/client-go/applyconfiguration/utils.go b/client-go/applyconfiguration/utils.go index 391fb1ca51..690ae44cdc 100644 --- a/client-go/applyconfiguration/utils.go +++ b/client-go/applyconfiguration/utils.go @@ -36,6 +36,10 @@ func ForKind(kind schema.GroupVersionKind) interface{} { return &kueuev1alpha1.KubeconfigRefApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("MultiKueueCluster"): return &kueuev1alpha1.MultiKueueClusterApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("MultiKueueClusterSpec"): + return &kueuev1alpha1.MultiKueueClusterSpecApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("MultiKueueClusterStatus"): + return &kueuev1alpha1.MultiKueueClusterStatusApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("MultiKueueConfig"): return &kueuev1alpha1.MultiKueueConfigApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("MultiKueueConfigSpec"): diff --git a/client-go/clientset/versioned/typed/kueue/v1alpha1/fake/fake_kueue_client.go b/client-go/clientset/versioned/typed/kueue/v1alpha1/fake/fake_kueue_client.go index e79d88c0ed..4fd5360c56 100644 --- a/client-go/clientset/versioned/typed/kueue/v1alpha1/fake/fake_kueue_client.go +++ b/client-go/clientset/versioned/typed/kueue/v1alpha1/fake/fake_kueue_client.go @@ -27,6 +27,10 @@ type FakeKueueV1alpha1 struct { *testing.Fake } +func (c *FakeKueueV1alpha1) MultiKueueClusters() v1alpha1.MultiKueueClusterInterface { + return &FakeMultiKueueClusters{c} +} + func (c *FakeKueueV1alpha1) MultiKueueConfigs() v1alpha1.MultiKueueConfigInterface { return &FakeMultiKueueConfigs{c} } diff --git a/client-go/clientset/versioned/typed/kueue/v1alpha1/fake/fake_multikueuecluster.go b/client-go/clientset/versioned/typed/kueue/v1alpha1/fake/fake_multikueuecluster.go new file mode 100644 index 0000000000..70509a5bf9 --- /dev/null +++ b/client-go/clientset/versioned/typed/kueue/v1alpha1/fake/fake_multikueuecluster.go @@ -0,0 +1,177 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" + v1alpha1 "sigs.k8s.io/kueue/apis/kueue/v1alpha1" + kueuev1alpha1 "sigs.k8s.io/kueue/client-go/applyconfiguration/kueue/v1alpha1" +) + +// FakeMultiKueueClusters implements MultiKueueClusterInterface +type FakeMultiKueueClusters struct { + Fake *FakeKueueV1alpha1 +} + +var multikueueclustersResource = v1alpha1.SchemeGroupVersion.WithResource("multikueueclusters") + +var multikueueclustersKind = v1alpha1.SchemeGroupVersion.WithKind("MultiKueueCluster") + +// Get takes name of the multiKueueCluster, and returns the corresponding multiKueueCluster object, and an error if there is any. +func (c *FakeMultiKueueClusters) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.MultiKueueCluster, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(multikueueclustersResource, name), &v1alpha1.MultiKueueCluster{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.MultiKueueCluster), err +} + +// List takes label and field selectors, and returns the list of MultiKueueClusters that match those selectors. +func (c *FakeMultiKueueClusters) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.MultiKueueClusterList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(multikueueclustersResource, multikueueclustersKind, opts), &v1alpha1.MultiKueueClusterList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.MultiKueueClusterList{ListMeta: obj.(*v1alpha1.MultiKueueClusterList).ListMeta} + for _, item := range obj.(*v1alpha1.MultiKueueClusterList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested multiKueueClusters. +func (c *FakeMultiKueueClusters) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(multikueueclustersResource, opts)) +} + +// Create takes the representation of a multiKueueCluster and creates it. Returns the server's representation of the multiKueueCluster, and an error, if there is any. +func (c *FakeMultiKueueClusters) Create(ctx context.Context, multiKueueCluster *v1alpha1.MultiKueueCluster, opts v1.CreateOptions) (result *v1alpha1.MultiKueueCluster, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(multikueueclustersResource, multiKueueCluster), &v1alpha1.MultiKueueCluster{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.MultiKueueCluster), err +} + +// Update takes the representation of a multiKueueCluster and updates it. Returns the server's representation of the multiKueueCluster, and an error, if there is any. +func (c *FakeMultiKueueClusters) Update(ctx context.Context, multiKueueCluster *v1alpha1.MultiKueueCluster, opts v1.UpdateOptions) (result *v1alpha1.MultiKueueCluster, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(multikueueclustersResource, multiKueueCluster), &v1alpha1.MultiKueueCluster{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.MultiKueueCluster), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeMultiKueueClusters) UpdateStatus(ctx context.Context, multiKueueCluster *v1alpha1.MultiKueueCluster, opts v1.UpdateOptions) (*v1alpha1.MultiKueueCluster, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(multikueueclustersResource, "status", multiKueueCluster), &v1alpha1.MultiKueueCluster{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.MultiKueueCluster), err +} + +// Delete takes name of the multiKueueCluster and deletes it. Returns an error if one occurs. +func (c *FakeMultiKueueClusters) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(multikueueclustersResource, name, opts), &v1alpha1.MultiKueueCluster{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeMultiKueueClusters) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(multikueueclustersResource, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.MultiKueueClusterList{}) + return err +} + +// Patch applies the patch and returns the patched multiKueueCluster. +func (c *FakeMultiKueueClusters) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.MultiKueueCluster, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(multikueueclustersResource, name, pt, data, subresources...), &v1alpha1.MultiKueueCluster{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.MultiKueueCluster), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied multiKueueCluster. +func (c *FakeMultiKueueClusters) Apply(ctx context.Context, multiKueueCluster *kueuev1alpha1.MultiKueueClusterApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.MultiKueueCluster, err error) { + if multiKueueCluster == nil { + return nil, fmt.Errorf("multiKueueCluster provided to Apply must not be nil") + } + data, err := json.Marshal(multiKueueCluster) + if err != nil { + return nil, err + } + name := multiKueueCluster.Name + if name == nil { + return nil, fmt.Errorf("multiKueueCluster.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(multikueueclustersResource, *name, types.ApplyPatchType, data), &v1alpha1.MultiKueueCluster{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.MultiKueueCluster), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeMultiKueueClusters) ApplyStatus(ctx context.Context, multiKueueCluster *kueuev1alpha1.MultiKueueClusterApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.MultiKueueCluster, err error) { + if multiKueueCluster == nil { + return nil, fmt.Errorf("multiKueueCluster provided to Apply must not be nil") + } + data, err := json.Marshal(multiKueueCluster) + if err != nil { + return nil, err + } + name := multiKueueCluster.Name + if name == nil { + return nil, fmt.Errorf("multiKueueCluster.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(multikueueclustersResource, *name, types.ApplyPatchType, data, "status"), &v1alpha1.MultiKueueCluster{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.MultiKueueCluster), err +} diff --git a/client-go/clientset/versioned/typed/kueue/v1alpha1/generated_expansion.go b/client-go/clientset/versioned/typed/kueue/v1alpha1/generated_expansion.go index c7f54bb59f..68d6592938 100644 --- a/client-go/clientset/versioned/typed/kueue/v1alpha1/generated_expansion.go +++ b/client-go/clientset/versioned/typed/kueue/v1alpha1/generated_expansion.go @@ -17,4 +17,6 @@ limitations under the License. package v1alpha1 +type MultiKueueClusterExpansion interface{} + type MultiKueueConfigExpansion interface{} diff --git a/client-go/clientset/versioned/typed/kueue/v1alpha1/kueue_client.go b/client-go/clientset/versioned/typed/kueue/v1alpha1/kueue_client.go index 3ea3317ec7..9f143a91e4 100644 --- a/client-go/clientset/versioned/typed/kueue/v1alpha1/kueue_client.go +++ b/client-go/clientset/versioned/typed/kueue/v1alpha1/kueue_client.go @@ -27,6 +27,7 @@ import ( type KueueV1alpha1Interface interface { RESTClient() rest.Interface + MultiKueueClustersGetter MultiKueueConfigsGetter } @@ -35,6 +36,10 @@ type KueueV1alpha1Client struct { restClient rest.Interface } +func (c *KueueV1alpha1Client) MultiKueueClusters() MultiKueueClusterInterface { + return newMultiKueueClusters(c) +} + func (c *KueueV1alpha1Client) MultiKueueConfigs() MultiKueueConfigInterface { return newMultiKueueConfigs(c) } diff --git a/client-go/clientset/versioned/typed/kueue/v1alpha1/multikueuecluster.go b/client-go/clientset/versioned/typed/kueue/v1alpha1/multikueuecluster.go new file mode 100644 index 0000000000..9851c39d41 --- /dev/null +++ b/client-go/clientset/versioned/typed/kueue/v1alpha1/multikueuecluster.go @@ -0,0 +1,242 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + json "encoding/json" + "fmt" + "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1alpha1 "sigs.k8s.io/kueue/apis/kueue/v1alpha1" + kueuev1alpha1 "sigs.k8s.io/kueue/client-go/applyconfiguration/kueue/v1alpha1" + scheme "sigs.k8s.io/kueue/client-go/clientset/versioned/scheme" +) + +// MultiKueueClustersGetter has a method to return a MultiKueueClusterInterface. +// A group's client should implement this interface. +type MultiKueueClustersGetter interface { + MultiKueueClusters() MultiKueueClusterInterface +} + +// MultiKueueClusterInterface has methods to work with MultiKueueCluster resources. +type MultiKueueClusterInterface interface { + Create(ctx context.Context, multiKueueCluster *v1alpha1.MultiKueueCluster, opts v1.CreateOptions) (*v1alpha1.MultiKueueCluster, error) + Update(ctx context.Context, multiKueueCluster *v1alpha1.MultiKueueCluster, opts v1.UpdateOptions) (*v1alpha1.MultiKueueCluster, error) + UpdateStatus(ctx context.Context, multiKueueCluster *v1alpha1.MultiKueueCluster, opts v1.UpdateOptions) (*v1alpha1.MultiKueueCluster, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.MultiKueueCluster, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.MultiKueueClusterList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.MultiKueueCluster, err error) + Apply(ctx context.Context, multiKueueCluster *kueuev1alpha1.MultiKueueClusterApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.MultiKueueCluster, err error) + ApplyStatus(ctx context.Context, multiKueueCluster *kueuev1alpha1.MultiKueueClusterApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.MultiKueueCluster, err error) + MultiKueueClusterExpansion +} + +// multiKueueClusters implements MultiKueueClusterInterface +type multiKueueClusters struct { + client rest.Interface +} + +// newMultiKueueClusters returns a MultiKueueClusters +func newMultiKueueClusters(c *KueueV1alpha1Client) *multiKueueClusters { + return &multiKueueClusters{ + client: c.RESTClient(), + } +} + +// Get takes name of the multiKueueCluster, and returns the corresponding multiKueueCluster object, and an error if there is any. +func (c *multiKueueClusters) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.MultiKueueCluster, err error) { + result = &v1alpha1.MultiKueueCluster{} + err = c.client.Get(). + Resource("multikueueclusters"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of MultiKueueClusters that match those selectors. +func (c *multiKueueClusters) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.MultiKueueClusterList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.MultiKueueClusterList{} + err = c.client.Get(). + Resource("multikueueclusters"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested multiKueueClusters. +func (c *multiKueueClusters) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("multikueueclusters"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a multiKueueCluster and creates it. Returns the server's representation of the multiKueueCluster, and an error, if there is any. +func (c *multiKueueClusters) Create(ctx context.Context, multiKueueCluster *v1alpha1.MultiKueueCluster, opts v1.CreateOptions) (result *v1alpha1.MultiKueueCluster, err error) { + result = &v1alpha1.MultiKueueCluster{} + err = c.client.Post(). + Resource("multikueueclusters"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(multiKueueCluster). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a multiKueueCluster and updates it. Returns the server's representation of the multiKueueCluster, and an error, if there is any. +func (c *multiKueueClusters) Update(ctx context.Context, multiKueueCluster *v1alpha1.MultiKueueCluster, opts v1.UpdateOptions) (result *v1alpha1.MultiKueueCluster, err error) { + result = &v1alpha1.MultiKueueCluster{} + err = c.client.Put(). + Resource("multikueueclusters"). + Name(multiKueueCluster.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(multiKueueCluster). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *multiKueueClusters) UpdateStatus(ctx context.Context, multiKueueCluster *v1alpha1.MultiKueueCluster, opts v1.UpdateOptions) (result *v1alpha1.MultiKueueCluster, err error) { + result = &v1alpha1.MultiKueueCluster{} + err = c.client.Put(). + Resource("multikueueclusters"). + Name(multiKueueCluster.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(multiKueueCluster). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the multiKueueCluster and deletes it. Returns an error if one occurs. +func (c *multiKueueClusters) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("multikueueclusters"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *multiKueueClusters) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("multikueueclusters"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched multiKueueCluster. +func (c *multiKueueClusters) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.MultiKueueCluster, err error) { + result = &v1alpha1.MultiKueueCluster{} + err = c.client.Patch(pt). + Resource("multikueueclusters"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied multiKueueCluster. +func (c *multiKueueClusters) Apply(ctx context.Context, multiKueueCluster *kueuev1alpha1.MultiKueueClusterApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.MultiKueueCluster, err error) { + if multiKueueCluster == nil { + return nil, fmt.Errorf("multiKueueCluster provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(multiKueueCluster) + if err != nil { + return nil, err + } + name := multiKueueCluster.Name + if name == nil { + return nil, fmt.Errorf("multiKueueCluster.Name must be provided to Apply") + } + result = &v1alpha1.MultiKueueCluster{} + err = c.client.Patch(types.ApplyPatchType). + Resource("multikueueclusters"). + Name(*name). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *multiKueueClusters) ApplyStatus(ctx context.Context, multiKueueCluster *kueuev1alpha1.MultiKueueClusterApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.MultiKueueCluster, err error) { + if multiKueueCluster == nil { + return nil, fmt.Errorf("multiKueueCluster provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(multiKueueCluster) + if err != nil { + return nil, err + } + + name := multiKueueCluster.Name + if name == nil { + return nil, fmt.Errorf("multiKueueCluster.Name must be provided to Apply") + } + + result = &v1alpha1.MultiKueueCluster{} + err = c.client.Patch(types.ApplyPatchType). + Resource("multikueueclusters"). + Name(*name). + SubResource("status"). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/client-go/informers/externalversions/generic.go b/client-go/informers/externalversions/generic.go index d47a142815..20debdeb89 100644 --- a/client-go/informers/externalversions/generic.go +++ b/client-go/informers/externalversions/generic.go @@ -54,6 +54,8 @@ func (f *genericInformer) Lister() cache.GenericLister { func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { switch resource { // Group=kueue.x-k8s.io, Version=v1alpha1 + case v1alpha1.SchemeGroupVersion.WithResource("multikueueclusters"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Kueue().V1alpha1().MultiKueueClusters().Informer()}, nil case v1alpha1.SchemeGroupVersion.WithResource("multikueueconfigs"): return &genericInformer{resource: resource.GroupResource(), informer: f.Kueue().V1alpha1().MultiKueueConfigs().Informer()}, nil diff --git a/client-go/informers/externalversions/kueue/v1alpha1/interface.go b/client-go/informers/externalversions/kueue/v1alpha1/interface.go index d2ff8e10de..4ccfaffc78 100644 --- a/client-go/informers/externalversions/kueue/v1alpha1/interface.go +++ b/client-go/informers/externalversions/kueue/v1alpha1/interface.go @@ -23,6 +23,8 @@ import ( // Interface provides access to all the informers in this group version. type Interface interface { + // MultiKueueClusters returns a MultiKueueClusterInformer. + MultiKueueClusters() MultiKueueClusterInformer // MultiKueueConfigs returns a MultiKueueConfigInformer. MultiKueueConfigs() MultiKueueConfigInformer } @@ -38,6 +40,11 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } +// MultiKueueClusters returns a MultiKueueClusterInformer. +func (v *version) MultiKueueClusters() MultiKueueClusterInformer { + return &multiKueueClusterInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + // MultiKueueConfigs returns a MultiKueueConfigInformer. func (v *version) MultiKueueConfigs() MultiKueueConfigInformer { return &multiKueueConfigInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} diff --git a/client-go/informers/externalversions/kueue/v1alpha1/multikueuecluster.go b/client-go/informers/externalversions/kueue/v1alpha1/multikueuecluster.go new file mode 100644 index 0000000000..13fd62b717 --- /dev/null +++ b/client-go/informers/externalversions/kueue/v1alpha1/multikueuecluster.go @@ -0,0 +1,88 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + time "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + kueuev1alpha1 "sigs.k8s.io/kueue/apis/kueue/v1alpha1" + versioned "sigs.k8s.io/kueue/client-go/clientset/versioned" + internalinterfaces "sigs.k8s.io/kueue/client-go/informers/externalversions/internalinterfaces" + v1alpha1 "sigs.k8s.io/kueue/client-go/listers/kueue/v1alpha1" +) + +// MultiKueueClusterInformer provides access to a shared informer and lister for +// MultiKueueClusters. +type MultiKueueClusterInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.MultiKueueClusterLister +} + +type multiKueueClusterInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewMultiKueueClusterInformer constructs a new informer for MultiKueueCluster type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewMultiKueueClusterInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredMultiKueueClusterInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredMultiKueueClusterInformer constructs a new informer for MultiKueueCluster type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredMultiKueueClusterInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.KueueV1alpha1().MultiKueueClusters().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.KueueV1alpha1().MultiKueueClusters().Watch(context.TODO(), options) + }, + }, + &kueuev1alpha1.MultiKueueCluster{}, + resyncPeriod, + indexers, + ) +} + +func (f *multiKueueClusterInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredMultiKueueClusterInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *multiKueueClusterInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&kueuev1alpha1.MultiKueueCluster{}, f.defaultInformer) +} + +func (f *multiKueueClusterInformer) Lister() v1alpha1.MultiKueueClusterLister { + return v1alpha1.NewMultiKueueClusterLister(f.Informer().GetIndexer()) +} diff --git a/client-go/listers/kueue/v1alpha1/expansion_generated.go b/client-go/listers/kueue/v1alpha1/expansion_generated.go index 466635f88d..23fd7f72e5 100644 --- a/client-go/listers/kueue/v1alpha1/expansion_generated.go +++ b/client-go/listers/kueue/v1alpha1/expansion_generated.go @@ -17,6 +17,10 @@ limitations under the License. package v1alpha1 +// MultiKueueClusterListerExpansion allows custom methods to be added to +// MultiKueueClusterLister. +type MultiKueueClusterListerExpansion interface{} + // MultiKueueConfigListerExpansion allows custom methods to be added to // MultiKueueConfigLister. type MultiKueueConfigListerExpansion interface{} diff --git a/client-go/listers/kueue/v1alpha1/multikueuecluster.go b/client-go/listers/kueue/v1alpha1/multikueuecluster.go new file mode 100644 index 0000000000..1d129d4aa7 --- /dev/null +++ b/client-go/listers/kueue/v1alpha1/multikueuecluster.go @@ -0,0 +1,67 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" + v1alpha1 "sigs.k8s.io/kueue/apis/kueue/v1alpha1" +) + +// MultiKueueClusterLister helps list MultiKueueClusters. +// All objects returned here must be treated as read-only. +type MultiKueueClusterLister interface { + // List lists all MultiKueueClusters in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.MultiKueueCluster, err error) + // Get retrieves the MultiKueueCluster from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha1.MultiKueueCluster, error) + MultiKueueClusterListerExpansion +} + +// multiKueueClusterLister implements the MultiKueueClusterLister interface. +type multiKueueClusterLister struct { + indexer cache.Indexer +} + +// NewMultiKueueClusterLister returns a new MultiKueueClusterLister. +func NewMultiKueueClusterLister(indexer cache.Indexer) MultiKueueClusterLister { + return &multiKueueClusterLister{indexer: indexer} +} + +// List lists all MultiKueueClusters in the indexer. +func (s *multiKueueClusterLister) List(selector labels.Selector) (ret []*v1alpha1.MultiKueueCluster, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.MultiKueueCluster)) + }) + return ret, err +} + +// Get retrieves the MultiKueueCluster from the index for a given name. +func (s *multiKueueClusterLister) Get(name string) (*v1alpha1.MultiKueueCluster, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("multikueuecluster"), name) + } + return obj.(*v1alpha1.MultiKueueCluster), nil +} diff --git a/config/components/crd/bases/kueue.x-k8s.io_multikueueclusters.yaml b/config/components/crd/bases/kueue.x-k8s.io_multikueueclusters.yaml new file mode 100644 index 0000000000..3d15e011af --- /dev/null +++ b/config/components/crd/bases/kueue.x-k8s.io_multikueueclusters.yaml @@ -0,0 +1,133 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.0 + name: multikueueclusters.kueue.x-k8s.io +spec: + group: kueue.x-k8s.io + names: + kind: MultiKueueCluster + listKind: MultiKueueClusterList + plural: multikueueclusters + singular: multikueuecluster + scope: Cluster + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: MultiKueueCluster is the Schema for the multikueue API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + kubeconfigRef: + properties: + location: + description: Location of the KubeConfig. + type: string + locationType: + default: Secret + description: Type of the KubeConfig location. + enum: + - Secret + type: string + name: + description: Name of the cluster inside the given KubeConfig. + type: string + required: + - location + - locationType + - name + type: object + required: + - kubeconfigRef + type: object + status: + properties: + conditions: + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/components/crd/bases/kueue.x-k8s.io_multikueueconfigs.yaml b/config/components/crd/bases/kueue.x-k8s.io_multikueueconfigs.yaml index e765baf5e1..62b641e14f 100644 --- a/config/components/crd/bases/kueue.x-k8s.io_multikueueconfigs.yaml +++ b/config/components/crd/bases/kueue.x-k8s.io_multikueueconfigs.yaml @@ -17,8 +17,7 @@ spec: - name: v1alpha1 schema: openAPIV3Schema: - description: MultiKueueConfig is the Schema for the provisioningrequestconfig - API + description: MultiKueueConfig is the Schema for the multikueue API properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation @@ -36,41 +35,14 @@ spec: description: MultiKueueConfigSpec defines the desired state of MultiKueueConfig properties: clusters: - description: clusters contains the list of configurations for using - worker clusters. + description: List of MultiKueueClusters names where the workloads + from the ClusterQueue should be distributed. items: - properties: - kubeconfigRef: - properties: - location: - description: Location of the KubeConfig. - type: string - locationType: - default: Secret - description: Type of the KubeConfig location. - enum: - - Secret - type: string - name: - description: Name of the cluster inside the given KubeConfig. - type: string - required: - - location - - locationType - - name - type: object - name: - type: string - required: - - kubeconfigRef - - name - type: object + type: string maxItems: 100 minItems: 1 type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map + x-kubernetes-list-type: set required: - clusters type: object diff --git a/config/components/crd/kustomization.yaml b/config/components/crd/kustomization.yaml index 0696084df5..e2f85f1088 100644 --- a/config/components/crd/kustomization.yaml +++ b/config/components/crd/kustomization.yaml @@ -10,6 +10,7 @@ resources: - bases/kueue.x-k8s.io_workloadpriorityclasses.yaml - bases/kueue.x-k8s.io_provisioningrequestconfigs.yaml - bases/kueue.x-k8s.io_multikueueconfigs.yaml +- bases/kueue.x-k8s.io_multikueueclusters.yaml #+kubebuilder:scaffold:crdkustomizeresource patches: From 3503c4a401de5c50d5bac730137e4a9d267384ca Mon Sep 17 00:00:00 2001 From: Traian Schiau Date: Thu, 18 Jan 2024 15:55:17 +0200 Subject: [PATCH 2/9] [multikueue] Make MultiKueueCluster an API object. --- charts/kueue/templates/rbac/role.yaml | 16 + config/components/rbac/role.yaml | 16 + .../multikueue/admissioncheck.go | 232 +++++------- .../multikueue/admissioncheck_test.go | 162 +-------- .../admissionchecks/multikueue/controllers.go | 11 +- .../admissionchecks/multikueue/indexer.go | 25 +- .../multikueue/indexer_test.go | 74 +--- .../multikueue/multikueuecluster.go | 339 ++++++++++++++++++ .../multikueue/multikueuecluster_test.go | 195 ++++++++++ .../multikueue/remoteclient.go | 173 --------- .../multikueue/remoteclient_test.go | 135 ------- .../admissionchecks/multikueue/workload.go | 71 ++-- .../multikueue/workload_test.go | 20 +- pkg/util/testing/wrappers.go | 62 ++++ test/e2e/multikueue/e2e_test.go | 44 +-- .../integration/multikueue/multikueue_test.go | 34 +- 16 files changed, 853 insertions(+), 756 deletions(-) create mode 100644 pkg/controller/admissionchecks/multikueue/multikueuecluster.go create mode 100644 pkg/controller/admissionchecks/multikueue/multikueuecluster_test.go delete mode 100644 pkg/controller/admissionchecks/multikueue/remoteclient.go delete mode 100644 pkg/controller/admissionchecks/multikueue/remoteclient_test.go diff --git a/charts/kueue/templates/rbac/role.yaml b/charts/kueue/templates/rbac/role.yaml index 9dd9d9fb85..217053db34 100644 --- a/charts/kueue/templates/rbac/role.yaml +++ b/charts/kueue/templates/rbac/role.yaml @@ -400,6 +400,22 @@ rules: - get - patch - update + - apiGroups: + - kueue.x-k8s.io + resources: + - multikueueclusters + verbs: + - get + - list + - watch + - apiGroups: + - kueue.x-k8s.io + resources: + - multikueueclusters/status + verbs: + - get + - patch + - update - apiGroups: - kueue.x-k8s.io resources: diff --git a/config/components/rbac/role.yaml b/config/components/rbac/role.yaml index f80ccfad42..c18ab90063 100644 --- a/config/components/rbac/role.yaml +++ b/config/components/rbac/role.yaml @@ -401,6 +401,22 @@ rules: - get - patch - update +- apiGroups: + - kueue.x-k8s.io + resources: + - multikueueclusters + verbs: + - get + - list + - watch +- apiGroups: + - kueue.x-k8s.io + resources: + - multikueueclusters/status + verbs: + - get + - patch + - update - apiGroups: - kueue.x-k8s.io resources: diff --git a/pkg/controller/admissionchecks/multikueue/admissioncheck.go b/pkg/controller/admissionchecks/multikueue/admissioncheck.go index 86080899f4..9fc236b863 100644 --- a/pkg/controller/admissionchecks/multikueue/admissioncheck.go +++ b/pkg/controller/admissionchecks/multikueue/admissioncheck.go @@ -18,14 +18,10 @@ package multikueue import ( "context" - "errors" "fmt" "strings" - "sync" - corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" - apierrors "k8s.io/apimachinery/pkg/api/errors" apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -35,7 +31,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" kueuealpha "sigs.k8s.io/kueue/apis/kueue/v1alpha1" @@ -54,105 +49,59 @@ func newMultiKueueStoreHelper(c client.Client) (*multiKueueStoreHelper, error) { } // ACReconciler implements the reconciler for all the admission checks controlled by multikueue. -// Its main tasks being to: -// - Maintain the list of remote controllers associated to each admission checks. -// - Maintain the active state of the admission checks. +// Its main task being to maintain the active state of the admission checks based on the heath +// of its referenced MultiKueueClusters. type ACReconciler struct { - client client.Client - helper *multiKueueStoreHelper - configNamespace string - - lock sync.RWMutex - // The list of remote controllers, indexed by the admission checks name. - controllers map[string]*remoteController - wlUpdateCh chan event.GenericEvent - - // rootContext - holds the context passed by the controller-runtime on Start. - // It's used to create child contexts for MultiKueueClusters client watch routines - // that will gracefully end when the controller-manager stops. - rootContext context.Context - - // For unit testing only. There is now need of creating fully functional remote clients in the unit tests - // and creating valid kubeconfig content is not trivial. - // The full client creation and usage is validated in the integration and e2e tests. - builderOverride clientWithWatchBuilder + client client.Client + helper *multiKueueStoreHelper } var _ reconcile.Reconciler = (*ACReconciler)(nil) -var _ manager.Runnable = (*ACReconciler)(nil) - -func (a *ACReconciler) controllerFor(acName string) (*remoteController, bool) { - a.lock.RLock() - defer a.lock.RUnlock() - - c, f := a.controllers[acName] - return c, f -} - -func (a *ACReconciler) setControllerFor(acName string, c *remoteController) { - a.lock.Lock() - defer a.lock.Unlock() - - if old, found := a.controllers[acName]; found { - old.watchCancel() - } - if c != nil { - a.controllers[acName] = c - return - } - delete(a.controllers, acName) -} - -func (a *ACReconciler) Start(ctx context.Context) error { - a.rootContext = ctx - return nil -} func (a *ACReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { log := ctrl.LoggerFrom(ctx) ac := &kueue.AdmissionCheck{} if err := a.client.Get(ctx, req.NamespacedName, ac); err != nil || ac.Spec.ControllerName != ControllerName { - if apierrors.IsNotFound(err) || !ac.DeletionTimestamp.IsZero() { - // stop/deleted a potential check controller - if cc, existing := a.controllerFor(req.Name); existing { - cc.watchCancel() - a.setControllerFor(req.Name, nil) - log.V(2).Info("Controller removed") - } - return reconcile.Result{}, nil - } - return reconcile.Result{}, err + return reconcile.Result{}, client.IgnoreNotFound(err) } - var remCtrl *remoteController + active := true inactiveReason := "" log.V(2).Info("Reconcile AdmissionCheck") if cfg, err := a.helper.ConfigFromRef(ctx, ac.Spec.Parameters); err != nil { + active = false inactiveReason = fmt.Sprintf("Cannot load the AdmissionChecks parameters: %s", err.Error()) } else { - kubeconfigs, err := a.getKubeConfigs(ctx, &cfg.Spec) - if err != nil { - a.setControllerFor(ac.Name, nil) - inactiveReason = fmt.Sprintf("Cannot load kubeconfigs: %s", err.Error()) - } else { - remoteCtrl, existing := a.controllerFor(ac.Name) - if !existing { - remoteCtrl = newRemoteController(a.rootContext, a.client, a.wlUpdateCh) - if a.builderOverride != nil { - remoteCtrl.builderOverride = a.builderOverride - } - a.setControllerFor(ac.Name, remoteCtrl) + var missingClusters []string + var inactiveClusters []string + // check the status of the clusters + for _, clusterName := range cfg.Spec.Clusters { + cluster := &kueuealpha.MultiKueueCluster{} + err := a.client.Get(ctx, types.NamespacedName{Name: clusterName}, cluster) + if client.IgnoreNotFound(err) != nil { + return reconcile.Result{}, err } - err := remoteCtrl.UpdateConfig(kubeconfigs) if err != nil { - inactiveReason = fmt.Sprintf("Cannot start MultiKueueClusters controller: %s", err.Error()) - a.setControllerFor(ac.Name, nil) + missingClusters = append(missingClusters, clusterName) } else { - remCtrl = remoteCtrl + if !apimeta.IsStatusConditionTrue(cluster.Status.Conditions, kueuealpha.MultiKueueClusterActive) { + inactiveClusters = append(inactiveClusters, clusterName) + } } } + + var messageParts []string + if len(missingClusters) > 0 { + active = false + messageParts = []string{fmt.Sprintf("Missing clusters: %v", missingClusters)} + } + if len(inactiveClusters) > 0 { + active = false + messageParts = append(messageParts, fmt.Sprintf("Inactive clusters: %v", inactiveClusters)) + } + inactiveReason = strings.Join(messageParts, ", ") } newCondition := metav1.Condition{ @@ -161,7 +110,7 @@ func (a *ACReconciler) Reconcile(ctx context.Context, req reconcile.Request) (re Reason: "Active", Message: "The admission check is active", } - if remCtrl.IsActive() { + if active { newCondition.Status = metav1.ConditionTrue newCondition.Reason = "Active" newCondition.Message = "The admission check is active" @@ -184,67 +133,34 @@ func (a *ACReconciler) Reconcile(ctx context.Context, req reconcile.Request) (re return reconcile.Result{}, nil } -func (cc *ACReconciler) getKubeConfigs(ctx context.Context, spec *kueuealpha.MultiKueueConfigSpec) (map[string][]byte, error) { - ret := make(map[string][]byte, len(spec.Clusters)) - for _, c := range spec.Clusters { - ref := c.KubeconfigRef - sec := corev1.Secret{} - - secretObjKey := types.NamespacedName{ - Namespace: cc.configNamespace, - Name: ref.Location, - } - err := cc.client.Get(ctx, secretObjKey, &sec) - if err != nil { - return nil, fmt.Errorf("getting kubeconfig secret for %q: %w", c.Name, err) - } - - kconfigBytes, found := sec.Data[kueuealpha.MultiKueueConfigSecretKey] - if !found { - return nil, fmt.Errorf("getting kubeconfig secret for %q: key %q not found in secret %q", c.Name, kueuealpha.MultiKueueConfigSecretKey, ref.Location) - } - ret[c.Name] = kconfigBytes - } - return ret, nil -} - // +kubebuilder:rbac:groups="",resources=events,verbs=create;watch;update // +kubebuilder:rbac:groups=kueue.x-k8s.io,resources=workloads,verbs=get;list;watch;update;patch;delete // +kubebuilder:rbac:groups=kueue.x-k8s.io,resources=workloads/status,verbs=get;update;patch // +kubebuilder:rbac:groups=kueue.x-k8s.io,resources=admissionchecks,verbs=get;list;watch // +kubebuilder:rbac:groups=kueue.x-k8s.io,resources=multikueueconfigs,verbs=get;list;watch -func newACController(c client.Client, helper *multiKueueStoreHelper, namespace string) *ACReconciler { +func newACReconciler(c client.Client, helper *multiKueueStoreHelper) *ACReconciler { return &ACReconciler{ - client: c, - helper: helper, - configNamespace: namespace, - controllers: make(map[string]*remoteController), - wlUpdateCh: make(chan event.GenericEvent, 10), + client: c, + helper: helper, } - } func (a *ACReconciler) setupWithManager(mgr ctrl.Manager) error { - err := mgr.Add(a) - if err != nil { - return err - } - return ctrl.NewControllerManagedBy(mgr). For(&kueue.AdmissionCheck{}). - Watches(&kueuealpha.MultiKueueConfig{}, &mkcHandler{client: a.client}). - Watches(&corev1.Secret{}, &secretHandler{client: a.client}). + Watches(&kueuealpha.MultiKueueConfig{}, &mkConfigHandler{client: a.client}). + Watches(&kueuealpha.MultiKueueCluster{}, &mkClusterHandler{client: a.client}). Complete(a) } -type mkcHandler struct { +type mkConfigHandler struct { client client.Client } -var _ handler.EventHandler = (*mkcHandler)(nil) +var _ handler.EventHandler = (*mkConfigHandler)(nil) -func (m *mkcHandler) Create(ctx context.Context, event event.CreateEvent, q workqueue.RateLimitingInterface) { +func (m *mkConfigHandler) Create(ctx context.Context, event event.CreateEvent, q workqueue.RateLimitingInterface) { mkc, isMKC := event.Object.(*kueuealpha.MultiKueueConfig) if !isMKC { return @@ -255,7 +171,7 @@ func (m *mkcHandler) Create(ctx context.Context, event event.CreateEvent, q work } } -func (m *mkcHandler) Update(ctx context.Context, event event.UpdateEvent, q workqueue.RateLimitingInterface) { +func (m *mkConfigHandler) Update(ctx context.Context, event event.UpdateEvent, q workqueue.RateLimitingInterface) { oldMKC, isOldMKC := event.ObjectOld.(*kueuealpha.MultiKueueConfig) newMKC, isNewMKC := event.ObjectNew.(*kueuealpha.MultiKueueConfig) if !isOldMKC || !isNewMKC || equality.Semantic.DeepEqual(oldMKC.Spec.Clusters, newMKC.Spec.Clusters) { @@ -267,7 +183,7 @@ func (m *mkcHandler) Update(ctx context.Context, event event.UpdateEvent, q work } } -func (m *mkcHandler) Delete(ctx context.Context, event event.DeleteEvent, q workqueue.RateLimitingInterface) { +func (m *mkConfigHandler) Delete(ctx context.Context, event event.DeleteEvent, q workqueue.RateLimitingInterface) { mkc, isMKC := event.Object.(*kueuealpha.MultiKueueConfig) if !isMKC { return @@ -278,7 +194,7 @@ func (m *mkcHandler) Delete(ctx context.Context, event event.DeleteEvent, q work } } -func (m *mkcHandler) Generic(ctx context.Context, event event.GenericEvent, q workqueue.RateLimitingInterface) { +func (m *mkConfigHandler) Generic(ctx context.Context, event event.GenericEvent, q workqueue.RateLimitingInterface) { mkc, isMKC := event.Object.(*kueuealpha.MultiKueueConfig) if !isMKC { return @@ -308,49 +224,69 @@ func queueReconcileForConfigUsers(ctx context.Context, config string, c client.C return nil } -type secretHandler struct { +type mkClusterHandler struct { client client.Client } -var _ handler.EventHandler = (*secretHandler)(nil) +var _ handler.EventHandler = (*mkClusterHandler)(nil) -func (s *secretHandler) Create(ctx context.Context, event event.CreateEvent, q workqueue.RateLimitingInterface) { - if err := s.queue(ctx, event.Object, q); err != nil { - ctrl.LoggerFrom(ctx).V(5).Error(err, "Failure on create event", "secret", klog.KObj(event.Object)) +func (m *mkClusterHandler) Create(ctx context.Context, event event.CreateEvent, q workqueue.RateLimitingInterface) { + mkc, isMKC := event.Object.(*kueuealpha.MultiKueueConfig) + if !isMKC { + return } -} -func (s *secretHandler) Update(ctx context.Context, event event.UpdateEvent, q workqueue.RateLimitingInterface) { - if err := s.queue(ctx, event.ObjectOld, q); err != nil { - ctrl.LoggerFrom(ctx).V(5).Error(err, "Failure on update event", "secret", klog.KObj(event.ObjectOld)) + if err := queueReconcileForConfigUsers(ctx, mkc.Name, m.client, q); err != nil { + ctrl.LoggerFrom(ctx).V(5).Error(err, "Failure on create event", "multiKueueConfig", klog.KObj(mkc)) } } -func (s *secretHandler) Delete(ctx context.Context, event event.DeleteEvent, q workqueue.RateLimitingInterface) { - if err := s.queue(ctx, event.Object, q); err != nil { - ctrl.LoggerFrom(ctx).V(5).Error(err, "Failure on delete event", "secret", klog.KObj(event.Object)) +func (m *mkClusterHandler) Update(ctx context.Context, event event.UpdateEvent, q workqueue.RateLimitingInterface) { + oldMKC, isOldMKC := event.ObjectOld.(*kueuealpha.MultiKueueCluster) + newMKC, isNewMKC := event.ObjectNew.(*kueuealpha.MultiKueueCluster) + if !isOldMKC || !isNewMKC { + return + } + + oldActive := apimeta.IsStatusConditionTrue(oldMKC.Status.Conditions, kueuealpha.MultiKueueClusterActive) + newActive := apimeta.IsStatusConditionTrue(newMKC.Status.Conditions, kueuealpha.MultiKueueClusterActive) + if oldActive != newActive { + if err := m.queue(ctx, newMKC, q); err != nil { + ctrl.LoggerFrom(ctx).V(5).Error(err, "Failure on update event", "multiKueueCluster", klog.KObj(oldMKC)) + } } } -func (s *secretHandler) Generic(ctx context.Context, event event.GenericEvent, q workqueue.RateLimitingInterface) { - if err := s.queue(ctx, event.Object, q); err != nil { - ctrl.LoggerFrom(ctx).V(5).Error(err, "Failure on generic event", "secret", klog.KObj(event.Object)) +func (m *mkClusterHandler) Delete(ctx context.Context, event event.DeleteEvent, q workqueue.RateLimitingInterface) { + mkc, isMKC := event.Object.(*kueuealpha.MultiKueueCluster) + if !isMKC { + return + } + + if err := m.queue(ctx, mkc, q); err != nil { + ctrl.LoggerFrom(ctx).V(5).Error(err, "Failure on delete event", "multiKueueCluster", klog.KObj(mkc)) } } -func (s *secretHandler) queue(ctx context.Context, obj client.Object, q workqueue.RateLimitingInterface) error { - users := &kueuealpha.MultiKueueConfigList{} - secret, isSecret := obj.(*corev1.Secret) - if !isSecret { - return errors.New("not a secret") +func (m *mkClusterHandler) Generic(ctx context.Context, event event.GenericEvent, q workqueue.RateLimitingInterface) { + mkc, isMKC := event.Object.(*kueuealpha.MultiKueueCluster) + if !isMKC { + return } - if err := s.client.List(ctx, users, client.MatchingFields{UsingKubeConfigs: strings.Join([]string{secret.Namespace, secret.Name}, "/")}); err != nil { + if err := m.queue(ctx, mkc, q); err != nil { + ctrl.LoggerFrom(ctx).V(5).Error(err, "Failure on generic event", "multiKueueCluster", klog.KObj(mkc)) + } +} + +func (m *mkClusterHandler) queue(ctx context.Context, cluster *kueuealpha.MultiKueueCluster, q workqueue.RateLimitingInterface) error { + users := &kueuealpha.MultiKueueConfigList{} + if err := m.client.List(ctx, users, client.MatchingFields{UsingMultiKueueClusters: cluster.Name}); err != nil { return err } for _, user := range users.Items { - if err := queueReconcileForConfigUsers(ctx, user.Name, s.client, q); err != nil { + if err := queueReconcileForConfigUsers(ctx, user.Name, m.client, q); err != nil { return err } } diff --git a/pkg/controller/admissionchecks/multikueue/admissioncheck_test.go b/pkg/controller/admissionchecks/multikueue/admissioncheck_test.go index d92718f388..fe33b5b65e 100644 --- a/pkg/controller/admissionchecks/multikueue/admissioncheck_test.go +++ b/pkg/controller/admissionchecks/multikueue/admissioncheck_test.go @@ -21,7 +21,6 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -31,33 +30,19 @@ import ( utiltesting "sigs.k8s.io/kueue/pkg/util/testing" ) -func testRemoteController() *remoteController { - return &remoteController{ - watchCancel: func() {}, - } -} - func TestReconcile(t *testing.T) { cases := map[string]struct { checks []kueue.AdmissionCheck - controllers map[string]*remoteController + clusters []kueuealpha.MultiKueueCluster reconcileFor string configs []kueuealpha.MultiKueueConfig - secrets []corev1.Secret - wantChecks []kueue.AdmissionCheck - wantControllers map[string]*remoteController - wantError error + wantChecks []kueue.AdmissionCheck + wantError error }{ "missing admissioncheck": { reconcileFor: "missing-ac", }, - "removed admissioncheck": { - reconcileFor: "removed-ac", - controllers: map[string]*remoteController{ - "removed-ac": testRemoteController(), - }, - }, "missing config": { reconcileFor: "ac1", checks: []kueue.AdmissionCheck{ @@ -92,7 +77,7 @@ func TestReconcile(t *testing.T) { Obj(), }, }, - "missing kubeconfig secret": { + "missing cluster": { reconcileFor: "ac1", checks: []kueue.AdmissionCheck{ *utiltesting.MakeAdmissionCheck("ac1"). @@ -101,20 +86,7 @@ func TestReconcile(t *testing.T) { Obj(), }, configs: []kueuealpha.MultiKueueConfig{ - { - ObjectMeta: metav1.ObjectMeta{Name: "config1"}, - Spec: kueuealpha.MultiKueueConfigSpec{ - Clusters: []kueuealpha.MultiKueueCluster{ - { - Name: "worker1", - KubeconfigRef: kueuealpha.KubeconfigRef{ - Location: "secret1", - LocationType: kueuealpha.SecretLocationType, - }, - }, - }, - }, - }, + *utiltesting.MakeMultiKueueConfig("config1").Clusters("worker1").Obj(), }, wantChecks: []kueue.AdmissionCheck{ *utiltesting.MakeAdmissionCheck("ac1"). @@ -124,12 +96,12 @@ func TestReconcile(t *testing.T) { Type: kueue.AdmissionCheckActive, Status: metav1.ConditionFalse, Reason: "Inactive", - Message: `Cannot load kubeconfigs: getting kubeconfig secret for "worker1": secrets "secret1" not found`, + Message: "Missing clusters: [worker1]", }). Obj(), }, }, - "missing kubeconfig key in secret": { + "inactive cluster": { reconcileFor: "ac1", checks: []kueue.AdmissionCheck{ *utiltesting.MakeAdmissionCheck("ac1"). @@ -138,70 +110,11 @@ func TestReconcile(t *testing.T) { Obj(), }, configs: []kueuealpha.MultiKueueConfig{ - { - ObjectMeta: metav1.ObjectMeta{Name: "config1"}, - Spec: kueuealpha.MultiKueueConfigSpec{ - Clusters: []kueuealpha.MultiKueueCluster{ - { - Name: "worker1", - KubeconfigRef: kueuealpha.KubeconfigRef{ - Location: "secret1", - LocationType: kueuealpha.SecretLocationType, - }, - }, - }, - }, - }, - }, - secrets: []corev1.Secret{ - { - ObjectMeta: metav1.ObjectMeta{Namespace: TestNamespace, Name: "secret1"}, - }, - }, - wantChecks: []kueue.AdmissionCheck{ - *utiltesting.MakeAdmissionCheck("ac1"). - ControllerName(ControllerName). - Parameters(kueue.GroupVersion.Group, "MultiKueueConfig", "config1"). - Condition(metav1.Condition{ - Type: kueue.AdmissionCheckActive, - Status: metav1.ConditionFalse, - Reason: "Inactive", - Message: `Cannot load kubeconfigs: getting kubeconfig secret for "worker1": key "kubeconfig" not found in secret "secret1"`, - }). - Obj(), + *utiltesting.MakeMultiKueueConfig("config1").Clusters("worker1").Obj(), }, - }, - "invalid kubeconfig in secret": { - reconcileFor: "ac1", - checks: []kueue.AdmissionCheck{ - *utiltesting.MakeAdmissionCheck("ac1"). - ControllerName(ControllerName). - Parameters(kueue.GroupVersion.Group, "MultiKueueConfig", "config1"). - Obj(), - }, - configs: []kueuealpha.MultiKueueConfig{ - { - ObjectMeta: metav1.ObjectMeta{Name: "config1"}, - Spec: kueuealpha.MultiKueueConfigSpec{ - Clusters: []kueuealpha.MultiKueueCluster{ - { - Name: "worker1", - KubeconfigRef: kueuealpha.KubeconfigRef{ - Location: "secret1", - LocationType: kueuealpha.SecretLocationType, - }, - }, - }, - }, - }, - }, - secrets: []corev1.Secret{ - { - ObjectMeta: metav1.ObjectMeta{Namespace: TestNamespace, Name: "secret1"}, - Data: map[string][]byte{ - "kubeconfig": []byte("invalid"), - }, - }, + + clusters: []kueuealpha.MultiKueueCluster{ + *utiltesting.MakeMultiKueueCluster("worker1").Active(metav1.ConditionFalse, "ByTest", "by test").Obj(), }, wantChecks: []kueue.AdmissionCheck{ *utiltesting.MakeAdmissionCheck("ac1"). @@ -211,12 +124,12 @@ func TestReconcile(t *testing.T) { Type: kueue.AdmissionCheckActive, Status: metav1.ConditionFalse, Reason: "Inactive", - Message: `Cannot start MultiKueueClusters controller: cluster "worker1": invalid kubeconfig`, + Message: "Inactive clusters: [worker1]", }). Obj(), }, }, - "valid": { + "active": { reconcileFor: "ac1", checks: []kueue.AdmissionCheck{ *utiltesting.MakeAdmissionCheck("ac1"). @@ -225,28 +138,10 @@ func TestReconcile(t *testing.T) { Obj(), }, configs: []kueuealpha.MultiKueueConfig{ - { - ObjectMeta: metav1.ObjectMeta{Name: "config1"}, - Spec: kueuealpha.MultiKueueConfigSpec{ - Clusters: []kueuealpha.MultiKueueCluster{ - { - Name: "worker1", - KubeconfigRef: kueuealpha.KubeconfigRef{ - Location: "secret1", - LocationType: kueuealpha.SecretLocationType, - }, - }, - }, - }, - }, + *utiltesting.MakeMultiKueueConfig("config1").Clusters("worker1").Obj(), }, - secrets: []corev1.Secret{ - { - ObjectMeta: metav1.ObjectMeta{Namespace: TestNamespace, Name: "secret1"}, - Data: map[string][]byte{ - "kubeconfig": []byte("good kubeconfig"), - }, - }, + clusters: []kueuealpha.MultiKueueCluster{ + *utiltesting.MakeMultiKueueCluster("worker1").Active(metav1.ConditionTrue, "ByTest", "by test").Obj(), }, wantChecks: []kueue.AdmissionCheck{ *utiltesting.MakeAdmissionCheck("ac1"). @@ -260,15 +155,6 @@ func TestReconcile(t *testing.T) { }). Obj(), }, - wantControllers: map[string]*remoteController{ - "ac1": { - remoteClients: map[string]*remoteClient{ - "worker1": { - kubeconfig: []byte("good kubeconfig"), - }, - }, - }, - }, }, } @@ -279,7 +165,7 @@ func TestReconcile(t *testing.T) { builder = builder.WithLists( &kueue.AdmissionCheckList{Items: tc.checks}, &kueuealpha.MultiKueueConfigList{Items: tc.configs}, - &corev1.SecretList{Items: tc.secrets}, + &kueuealpha.MultiKueueClusterList{Items: tc.clusters}, ) for _, ac := range tc.checks { @@ -289,25 +175,13 @@ func TestReconcile(t *testing.T) { c := builder.Build() helper, _ := newMultiKueueStoreHelper(c) - reconciler := newACController(c, helper, TestNamespace) - if len(tc.controllers) > 0 { - reconciler.controllers = tc.controllers - } - reconciler.builderOverride = fakeClientBuilder - _ = reconciler.Start(ctx) + reconciler := newACReconciler(c, helper) _, gotErr := reconciler.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Name: tc.reconcileFor}}) if diff := cmp.Diff(tc.wantError, gotErr); diff != "" { t.Errorf("unexpected error (-want/+got):\n%s", diff) } - if diff := cmp.Diff(tc.wantControllers, reconciler.controllers, cmpopts.EquateEmpty(), - cmp.AllowUnexported(remoteController{}), - cmpopts.IgnoreFields(remoteController{}, "localClient", "watchCancel", "watchCtx", "wlUpdateCh", "builderOverride"), - cmp.Comparer(func(a, b remoteClient) bool { return string(a.kubeconfig) == string(b.kubeconfig) })); diff != "" { - t.Errorf("unexpected controllers (-want/+got):\n%s", diff) - } - checks := &kueue.AdmissionCheckList{} listErr := c.List(ctx, checks) diff --git a/pkg/controller/admissionchecks/multikueue/controllers.go b/pkg/controller/admissionchecks/multikueue/controllers.go index 14c84f7d24..11009d6311 100644 --- a/pkg/controller/admissionchecks/multikueue/controllers.go +++ b/pkg/controller/admissionchecks/multikueue/controllers.go @@ -24,15 +24,18 @@ func SetupControllers(mgr ctrl.Manager, namespace string) error { return err } - a := newACController(mgr.GetClient(), helper, namespace) - err = a.setupWithManager(mgr) + cRec := newClustersReconciler(mgr.GetClient(), namespace) + err = cRec.setupWithManager(mgr) if err != nil { return err } - wlRec := &wlReconciler{ - acr: a, + acRec := newACReconciler(mgr.GetClient(), helper) + err = acRec.setupWithManager(mgr) + if err != nil { + return err } + wlRec := newWlReconciler(mgr.GetClient(), helper, cRec) return wlRec.setupWithManager(mgr) } diff --git a/pkg/controller/admissionchecks/multikueue/indexer.go b/pkg/controller/admissionchecks/multikueue/indexer.go index d689fbddc2..8cc43bd02a 100644 --- a/pkg/controller/admissionchecks/multikueue/indexer.go +++ b/pkg/controller/admissionchecks/multikueue/indexer.go @@ -26,11 +26,11 @@ import ( kueuealpha "sigs.k8s.io/kueue/apis/kueue/v1alpha1" kueue "sigs.k8s.io/kueue/apis/kueue/v1beta1" "sigs.k8s.io/kueue/pkg/util/admissioncheck" - "sigs.k8s.io/kueue/pkg/util/slices" ) const ( UsingKubeConfigs = "spec.kubeconfigs" + UsingMultiKueueClusters = "spec.multiKueueClusters" AdmissionCheckUsingConfigKey = "spec.multiKueueConfig" ) @@ -40,19 +40,28 @@ var ( func getIndexUsingKubeConfigs(configNamespace string) func(obj client.Object) []string { return func(obj client.Object) []string { - cfg, isCfg := obj.(*kueuealpha.MultiKueueConfig) - if !isCfg || len(cfg.Spec.Clusters) == 0 { + cluster, isCluster := obj.(*kueuealpha.MultiKueueCluster) + if !isCluster { return nil } - return slices.Map(cfg.Spec.Clusters, func(c *kueuealpha.MultiKueueCluster) string { - return strings.Join([]string{configNamespace, c.KubeconfigRef.Location}, "/") - }) + return []string{strings.Join([]string{configNamespace, cluster.Spec.KubeconfigRef.Location}, "/")} } } +func indexUsingMultiKueueClusters(obj client.Object) []string { + config, isConfig := obj.(*kueuealpha.MultiKueueConfig) + if !isConfig { + return nil + } + return config.Spec.Clusters +} + func SetupIndexer(ctx context.Context, indexer client.FieldIndexer, configNamespace string) error { - if err := indexer.IndexField(ctx, &kueuealpha.MultiKueueConfig{}, UsingKubeConfigs, getIndexUsingKubeConfigs(configNamespace)); err != nil { - return fmt.Errorf("setting index on checks config used kubeconfig: %w", err) + if err := indexer.IndexField(ctx, &kueuealpha.MultiKueueCluster{}, UsingKubeConfigs, getIndexUsingKubeConfigs(configNamespace)); err != nil { + return fmt.Errorf("setting index on clusters config used kubeconfig: %w", err) + } + if err := indexer.IndexField(ctx, &kueuealpha.MultiKueueConfig{}, UsingMultiKueueClusters, indexUsingMultiKueueClusters); err != nil { + return fmt.Errorf("setting index on checks config used clusters: %w", err) } if err := indexer.IndexField(ctx, &kueue.AdmissionCheck{}, AdmissionCheckUsingConfigKey, admissioncheck.IndexerByConfigFunction(ControllerName, configGVK)); err != nil { return fmt.Errorf("setting index on admission checks config: %w", err) diff --git a/pkg/controller/admissionchecks/multikueue/indexer_test.go b/pkg/controller/admissionchecks/multikueue/indexer_test.go index 40d509df71..99122b23e6 100644 --- a/pkg/controller/admissionchecks/multikueue/indexer_test.go +++ b/pkg/controller/admissionchecks/multikueue/indexer_test.go @@ -67,13 +67,13 @@ func getClientBuilder() (*fake.ClientBuilder, context.Context) { func TestMultikueConfigUsingKubeconfig(t *testing.T) { cases := map[string]struct { - configs []*kueuealpha.MultiKueueConfig + configs []*kueuealpha.MultiKueueCluster filter client.ListOption wantListError error wantList []string }{ "no clusters": { - configs: []*kueuealpha.MultiKueueConfig{ + configs: []*kueuealpha.MultiKueueCluster{ { ObjectMeta: metav1.ObjectMeta{ Namespace: "default", @@ -84,71 +84,25 @@ func TestMultikueConfigUsingKubeconfig(t *testing.T) { filter: client.MatchingFields{UsingKubeConfigs: TestNamespace + "/secret1"}, }, "single cluster, single match": { - configs: []*kueuealpha.MultiKueueConfig{ - { - ObjectMeta: metav1.ObjectMeta{ - Namespace: "default", - Name: "config1", - }, - Spec: kueuealpha.MultiKueueConfigSpec{ - Clusters: []kueuealpha.MultiKueueCluster{ - { - KubeconfigRef: kueuealpha.KubeconfigRef{ - Location: "secret1", - }, - }, - }, - }, - }, + configs: []*kueuealpha.MultiKueueCluster{ + utiltesting.MakeMultiKueueCluster("cluster1").Secret("", "secret1").Obj(), }, filter: client.MatchingFields{UsingKubeConfigs: TestNamespace + "/secret1"}, - wantList: []string{"config1"}, + wantList: []string{"cluster1"}, }, "single cluster, no match": { - configs: []*kueuealpha.MultiKueueConfig{ - { - ObjectMeta: metav1.ObjectMeta{ - Namespace: "default", - Name: "config1", - }, - Spec: kueuealpha.MultiKueueConfigSpec{ - Clusters: []kueuealpha.MultiKueueCluster{ - { - KubeconfigRef: kueuealpha.KubeconfigRef{ - Location: "secret2", - }, - }, - }, - }, - }, + configs: []*kueuealpha.MultiKueueCluster{ + utiltesting.MakeMultiKueueCluster("cluster2").Secret("", "secret2").Obj(), }, filter: client.MatchingFields{UsingKubeConfigs: TestNamespace + "/secret1"}, }, "multiple clusters, single match": { - configs: []*kueuealpha.MultiKueueConfig{ - { - ObjectMeta: metav1.ObjectMeta{ - Namespace: "default", - Name: "config1", - }, - Spec: kueuealpha.MultiKueueConfigSpec{ - Clusters: []kueuealpha.MultiKueueCluster{ - { - KubeconfigRef: kueuealpha.KubeconfigRef{ - Location: "secret0", - }, - }, - { - KubeconfigRef: kueuealpha.KubeconfigRef{ - Location: "secret1", - }, - }, - }, - }, - }, + configs: []*kueuealpha.MultiKueueCluster{ + utiltesting.MakeMultiKueueCluster("cluster1").Secret("", "secret1").Obj(), + utiltesting.MakeMultiKueueCluster("cluster2").Secret("", "secret2").Obj(), }, filter: client.MatchingFields{UsingKubeConfigs: TestNamespace + "/secret1"}, - wantList: []string{"config1"}, + wantList: []string{"cluster1"}, }, } for name, tc := range cases { @@ -161,14 +115,14 @@ func TestMultikueConfigUsingKubeconfig(t *testing.T) { } } - lst := &kueuealpha.MultiKueueConfigList{} + lst := &kueuealpha.MultiKueueClusterList{} - gotListErr := k8sclient.List(ctx, lst, client.InNamespace("default"), tc.filter) + gotListErr := k8sclient.List(ctx, lst, tc.filter) if diff := cmp.Diff(tc.wantListError, gotListErr); diff != "" { t.Errorf("unexpected list error (-want/+got):\n%s", diff) } - gotList := slices.Map(lst.Items, func(mkc *kueuealpha.MultiKueueConfig) string { return mkc.Name }) + gotList := slices.Map(lst.Items, func(mkc *kueuealpha.MultiKueueCluster) string { return mkc.Name }) if diff := cmp.Diff(tc.wantList, gotList, cmpopts.EquateEmpty(), cmpopts.SortSlices(func(a, b string) bool { return a < b })); diff != "" { t.Errorf("unexpected list (-want/+got):\n%s", diff) } diff --git a/pkg/controller/admissionchecks/multikueue/multikueuecluster.go b/pkg/controller/admissionchecks/multikueue/multikueuecluster.go new file mode 100644 index 0000000000..2116210520 --- /dev/null +++ b/pkg/controller/admissionchecks/multikueue/multikueuecluster.go @@ -0,0 +1,339 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package multikueue + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + apimeta "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + kueuealpha "sigs.k8s.io/kueue/apis/kueue/v1alpha1" + kueue "sigs.k8s.io/kueue/apis/kueue/v1beta1" +) + +type clientWithWatchBuilder func(config []byte, options client.Options) (client.WithWatch, error) + +type remoteClient struct { + localClient client.Client + client client.WithWatch + wlUpdateCh chan<- event.GenericEvent + watchCancel func() + kubeconfig []byte + + // For unit testing only. There is now need of creating fully functional remote clients in the unit tests + // and creating valid kubeconfig content is not trivial. + // The full client creation and usage is validated in the integration and e2e tests. + builderOverride clientWithWatchBuilder +} + +func newRemoteClient(localClient client.Client, wlUpdateCh chan<- event.GenericEvent) *remoteClient { + rc := &remoteClient{ + wlUpdateCh: wlUpdateCh, + localClient: localClient, + } + return rc +} + +func newClientWithWatch(kubeconfig []byte, options client.Options) (client.WithWatch, error) { + restConfig, err := clientcmd.RESTConfigFromKubeConfig(kubeconfig) + if err != nil { + return nil, err + } + return client.NewWithWatch(restConfig, options) +} + +// setConfig - will try to recreate the k8s client and restart watching if the new config is different than +// the one currently used. +func (rc *remoteClient) setConfig(watchCtx context.Context, kubeconfig []byte) error { + if equality.Semantic.DeepEqual(kubeconfig, rc.kubeconfig) { + return nil + } + + if rc.watchCancel != nil { + rc.watchCancel() + rc.watchCancel = nil + } + + builder := newClientWithWatch + if rc.builderOverride != nil { + builder = rc.builderOverride + } + remoteClient, err := builder(kubeconfig, client.Options{Scheme: rc.localClient.Scheme()}) + if err != nil { + return err + } + rc.client = remoteClient + + newWatcher, err := rc.client.Watch(watchCtx, &kueue.WorkloadList{}) + if err != nil { + return nil + } + + go func() { + for r := range newWatcher.ResultChan() { + rc.queueWorkloadEvent(watchCtx, r) + } + }() + + rc.watchCancel = newWatcher.Stop + rc.kubeconfig = kubeconfig + return nil +} + +func (rc *remoteClient) queueWorkloadEvent(ctx context.Context, ev watch.Event) { + wl, isWl := ev.Object.(*kueue.Workload) + if !isWl { + return + } + + localWl := &kueue.Workload{} + if err := rc.localClient.Get(ctx, client.ObjectKeyFromObject(wl), localWl); err == nil { + rc.wlUpdateCh <- event.GenericEvent{Object: localWl} + } +} + +// clustersReconciler implements the reconciler for all MultiKueueClusters. +// Its main task being to maintain the list of remote clients associated to each MultiKueueCluster. +type clustersReconciler struct { + client client.Client + configNamespace string + + lock sync.RWMutex + // The list of remote clients, indexed by the cluster name. + clients map[string]*remoteClient + wlUpdateCh chan event.GenericEvent + + // rootContext - holds the context passed by the controller-runtime on Start. + // It's used to create child contexts for MultiKueueClusters client watch routines + // that will gracefully end when the controller-manager stops. + rootContext context.Context + + // For unit testing only. There is now need of creating fully functional remote clients in the unit tests + // and creating valid kubeconfig content is not trivial. + // The full client creation and usage is validated in the integration and e2e tests. + builderOverride clientWithWatchBuilder +} + +var _ manager.Runnable = (*clustersReconciler)(nil) +var _ reconcile.Reconciler = (*clustersReconciler)(nil) + +func (c *clustersReconciler) Start(ctx context.Context) error { + c.rootContext = ctx + return nil +} + +func (c *clustersReconciler) stopAndRemoveCluster(clusterName string) { + c.lock.Lock() + defer c.lock.Unlock() + if rc, found := c.clients[clusterName]; found { + rc.watchCancel() + delete(c.clients, clusterName) + } +} + +func (c *clustersReconciler) setRemoteClientConfig(clusterName string, kubeconfig []byte) error { + c.lock.Lock() + defer c.lock.Unlock() + + client, found := c.clients[clusterName] + if !found { + client = newRemoteClient(c.client, c.wlUpdateCh) + if c.builderOverride != nil { + client.builderOverride = c.builderOverride + } + c.clients[clusterName] = client + } + + if err := client.setConfig(c.rootContext, kubeconfig); err != nil { + delete(c.clients, clusterName) + return err + } + return nil +} + +func (a *clustersReconciler) controllerFor(acName string) (*remoteClient, bool) { + a.lock.RLock() + defer a.lock.RUnlock() + + c, f := a.clients[acName] + return c, f +} + +func (c *clustersReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { + cluster := &kueuealpha.MultiKueueCluster{} + + err := c.client.Get(ctx, req.NamespacedName, cluster) + if client.IgnoreNotFound(err) != nil { + return reconcile.Result{}, err + } + + if err != nil || !cluster.DeletionTimestamp.IsZero() { + c.stopAndRemoveCluster(req.Name) + // notify ac reconcilere + return reconcile.Result{}, nil + } + + // get the kubeconfig + kubeConfig, retry, err := c.getKubeConfig(ctx, &cluster.Spec.KubeconfigRef) + if retry { + return reconcile.Result{}, nil + } + if err != nil { + return reconcile.Result{}, c.updateStatus(ctx, cluster, false, "BadConfig", err.Error()) + } + + if err := c.setRemoteClientConfig(cluster.Name, kubeConfig); err != nil { + return reconcile.Result{}, c.updateStatus(ctx, cluster, false, "ClientConnectionFailed", err.Error()) + } + return reconcile.Result{}, c.updateStatus(ctx, cluster, true, "Active", "Connected") +} + +func (c *clustersReconciler) getKubeConfig(ctx context.Context, ref *kueuealpha.KubeconfigRef) ([]byte, bool, error) { + sec := corev1.Secret{} + secretObjKey := types.NamespacedName{ + Namespace: c.configNamespace, + Name: ref.Location, + } + err := c.client.Get(ctx, secretObjKey, &sec) + if err != nil { + return nil, !apierrors.IsNotFound(err), err + } + + kconfigBytes, found := sec.Data[kueuealpha.MultiKueueConfigSecretKey] + if !found { + return nil, false, fmt.Errorf("key %q not found in secret %q", kueuealpha.MultiKueueConfigSecretKey, ref.Location) + } + + return kconfigBytes, false, nil +} + +func (c *clustersReconciler) updateStatus(ctx context.Context, cluster *kueuealpha.MultiKueueCluster, active bool, reason, message string) error { + newCondition := metav1.Condition{ + Type: kueuealpha.MultiKueueClusterActive, + Status: metav1.ConditionFalse, + Reason: reason, + Message: message, + } + if active { + newCondition.Status = metav1.ConditionTrue + } + + // if the cluster is connected and remains that way, skip the status update + oldCondition := apimeta.FindStatusCondition(cluster.Status.Conditions, kueuealpha.MultiKueueClusterActive) + if active && oldCondition != nil && oldCondition.Status == metav1.ConditionTrue { + return nil + } + + apimeta.SetStatusCondition(&cluster.Status.Conditions, newCondition) + return c.client.Status().Update(ctx, cluster) +} + +// +kubebuilder:rbac:groups="",resources=events,verbs=create;watch;update +// +kubebuilder:rbac:groups=kueue.x-k8s.io,resources=multikueueclusters,verbs=get;list;watch +// +kubebuilder:rbac:groups=kueue.x-k8s.io,resources=multikueueclusters/status,verbs=get;update;patch + +func newClustersReconciler(c client.Client, namespace string) *clustersReconciler { + return &clustersReconciler{ + client: c, + configNamespace: namespace, + clients: make(map[string]*remoteClient), + wlUpdateCh: make(chan event.GenericEvent, 10), + } +} + +func (c *clustersReconciler) setupWithManager(mgr ctrl.Manager) error { + err := mgr.Add(c) + if err != nil { + return err + } + + return ctrl.NewControllerManagedBy(mgr). + For(&kueuealpha.MultiKueueCluster{}). + Watches(&corev1.Secret{}, &secretHandler{client: c.client}). + Complete(c) +} + +type secretHandler struct { + client client.Client +} + +var _ handler.EventHandler = (*secretHandler)(nil) + +func (s *secretHandler) Create(ctx context.Context, event event.CreateEvent, q workqueue.RateLimitingInterface) { + if err := s.queue(ctx, event.Object, q); err != nil { + ctrl.LoggerFrom(ctx).V(5).Error(err, "Failure on create event", "secret", klog.KObj(event.Object)) + } +} + +func (s *secretHandler) Update(ctx context.Context, event event.UpdateEvent, q workqueue.RateLimitingInterface) { + if err := s.queue(ctx, event.ObjectOld, q); err != nil { + ctrl.LoggerFrom(ctx).V(5).Error(err, "Failure on update event", "secret", klog.KObj(event.ObjectOld)) + } +} + +func (s *secretHandler) Delete(ctx context.Context, event event.DeleteEvent, q workqueue.RateLimitingInterface) { + if err := s.queue(ctx, event.Object, q); err != nil { + ctrl.LoggerFrom(ctx).V(5).Error(err, "Failure on delete event", "secret", klog.KObj(event.Object)) + } +} + +func (s *secretHandler) Generic(ctx context.Context, event event.GenericEvent, q workqueue.RateLimitingInterface) { + if err := s.queue(ctx, event.Object, q); err != nil { + ctrl.LoggerFrom(ctx).V(5).Error(err, "Failure on generic event", "secret", klog.KObj(event.Object)) + } +} + +func (s *secretHandler) queue(ctx context.Context, obj client.Object, q workqueue.RateLimitingInterface) error { + users := &kueuealpha.MultiKueueClusterList{} + secret, isSecret := obj.(*corev1.Secret) + if !isSecret { + return errors.New("not a secret") + } + + if err := s.client.List(ctx, users, client.MatchingFields{UsingKubeConfigs: strings.Join([]string{secret.Namespace, secret.Name}, "/")}); err != nil { + return err + } + + for _, user := range users.Items { + req := reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: user.Name, + }, + } + q.Add(req) + } + return nil +} diff --git a/pkg/controller/admissionchecks/multikueue/multikueuecluster_test.go b/pkg/controller/admissionchecks/multikueue/multikueuecluster_test.go new file mode 100644 index 0000000000..08130316f8 --- /dev/null +++ b/pkg/controller/admissionchecks/multikueue/multikueuecluster_test.go @@ -0,0 +1,195 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package multikueue + +import ( + "errors" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + kueuealpha "sigs.k8s.io/kueue/apis/kueue/v1alpha1" + "sigs.k8s.io/kueue/pkg/util/slices" + utiltesting "sigs.k8s.io/kueue/pkg/util/testing" +) + +var ( + errInvalidConfig = errors.New("invalid kubeconfig") +) + +func fakeClientBuilder(kubeconfig []byte, options client.Options) (client.WithWatch, error) { + if string(kubeconfig) == "invalid" { + return nil, errInvalidConfig + } + b, _ := getClientBuilder() + return b.Build(), nil +} + +func newTestClient(config string) *remoteClient { + b, _ := getClientBuilder() + localClient := b.Build() + ret := &remoteClient{ + kubeconfig: []byte(config), + localClient: localClient, + + builderOverride: fakeClientBuilder, + } + ret.watchCancel = func() { + ret.kubeconfig = []byte(string(ret.kubeconfig) + " canceled") + } + return ret +} + +func makeTestSecret(name string, kubeconfig string) corev1.Secret { + return corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: TestNamespace, + }, + Data: map[string][]byte{ + kueuealpha.MultiKueueConfigSecretKey: []byte(kubeconfig), + }, + } +} + +func TestUpdateConfig(t *testing.T) { + cases := map[string]struct { + reconcileFor string + remoteClients map[string]*remoteClient + clusters []kueuealpha.MultiKueueCluster + secrets []corev1.Secret + + wantRemoteClients map[string]*remoteClient + wantClusters []kueuealpha.MultiKueueCluster + }{ + "new valid client is added": { + reconcileFor: "worker1", + clusters: []kueuealpha.MultiKueueCluster{ + *utiltesting.MakeMultiKueueCluster("worker1").Secret("w1", "worker1").Obj(), + }, + secrets: []corev1.Secret{ + makeTestSecret("worker1", "worker1 kubeconfig"), + }, + wantClusters: []kueuealpha.MultiKueueCluster{ + *utiltesting.MakeMultiKueueCluster("worker1").Secret("w1", "worker1").Active(metav1.ConditionTrue, "Active", "Connected").Obj(), + }, + wantRemoteClients: map[string]*remoteClient{ + "worker1": { + kubeconfig: []byte("worker1 kubeconfig"), + }, + }, + }, + "update client with valid config": { + reconcileFor: "worker1", + clusters: []kueuealpha.MultiKueueCluster{ + *utiltesting.MakeMultiKueueCluster("worker1").Secret("w1", "worker1").Obj(), + }, + secrets: []corev1.Secret{ + makeTestSecret("worker1", "worker1 kubeconfig"), + }, + remoteClients: map[string]*remoteClient{ + "worker1": newTestClient("worker1 old kubeconfig"), + }, + wantClusters: []kueuealpha.MultiKueueCluster{ + *utiltesting.MakeMultiKueueCluster("worker1").Secret("w1", "worker1").Active(metav1.ConditionTrue, "Active", "Connected").Obj(), + }, + wantRemoteClients: map[string]*remoteClient{ + "worker1": { + kubeconfig: []byte("worker1 kubeconfig"), + }, + }, + }, + "update client with invalid config": { + reconcileFor: "worker1", + clusters: []kueuealpha.MultiKueueCluster{ + *utiltesting.MakeMultiKueueCluster("worker1").Secret("w1", "worker1").Obj(), + }, + secrets: []corev1.Secret{ + makeTestSecret("worker1", "invalid"), + }, + remoteClients: map[string]*remoteClient{ + "worker1": newTestClient("worker1 old kubeconfig"), + }, + wantClusters: []kueuealpha.MultiKueueCluster{ + *utiltesting.MakeMultiKueueCluster("worker1").Secret("w1", "worker1").Active(metav1.ConditionFalse, "ClientConnectionFailed", "invalid kubeconfig").Obj(), + }, + }, + "missing cluster is removed": { + reconcileFor: "worker2", + clusters: []kueuealpha.MultiKueueCluster{ + *utiltesting.MakeMultiKueueCluster("worker1").Secret("w1", "worker1").Obj(), + }, + remoteClients: map[string]*remoteClient{ + "worker1": newTestClient("worker1 kubeconfig"), + "worker2": newTestClient("worker2 kubeconfig"), + }, + wantClusters: []kueuealpha.MultiKueueCluster{ + *utiltesting.MakeMultiKueueCluster("worker1").Secret("w1", "worker1").Obj(), + }, + wantRemoteClients: map[string]*remoteClient{ + "worker1": { + kubeconfig: []byte("worker1 kubeconfig"), + }, + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + builder, ctx := getClientBuilder() + builder = builder.WithLists(&kueuealpha.MultiKueueClusterList{Items: tc.clusters}) + builder = builder.WithLists(&corev1.SecretList{Items: tc.secrets}) + builder = builder.WithStatusSubresource(slices.Map(tc.clusters, func(c *kueuealpha.MultiKueueCluster) client.Object { return c })...) + c := builder.Build() + + reconciler := newClustersReconciler(c, TestNamespace) + + if len(tc.remoteClients) > 0 { + reconciler.clients = tc.remoteClients + } + reconciler.builderOverride = fakeClientBuilder + + _, gotErr := reconciler.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Name: tc.reconcileFor}}) + if gotErr != nil { + t.Errorf("unexpected reconcile error: %s", gotErr) + } + + lst := &kueuealpha.MultiKueueClusterList{} + gotErr = c.List(ctx, lst) + if gotErr != nil { + t.Errorf("unexpected list clusters error: %s", gotErr) + } + + if diff := cmp.Diff(tc.wantClusters, lst.Items, cmpopts.EquateEmpty(), + cmpopts.IgnoreFields(metav1.ObjectMeta{}, "ResourceVersion"), + cmpopts.IgnoreFields(metav1.Condition{}, "LastTransitionTime")); diff != "" { + t.Errorf("unexpected clusters (-want/+got):\n%s", diff) + } + + if diff := cmp.Diff(tc.wantRemoteClients, reconciler.clients, cmpopts.EquateEmpty(), + cmp.Comparer(func(a, b remoteClient) bool { return string(a.kubeconfig) == string(b.kubeconfig) })); diff != "" { + t.Errorf("unexpected controllers (-want/+got):\n%s", diff) + } + }) + } +} diff --git a/pkg/controller/admissionchecks/multikueue/remoteclient.go b/pkg/controller/admissionchecks/multikueue/remoteclient.go deleted file mode 100644 index 696bc26f2e..0000000000 --- a/pkg/controller/admissionchecks/multikueue/remoteclient.go +++ /dev/null @@ -1,173 +0,0 @@ -/* -Copyright 2024 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package multikueue - -import ( - "context" - "fmt" - - "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/tools/clientcmd" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/event" - - kueue "sigs.k8s.io/kueue/apis/kueue/v1beta1" -) - -type clientWithWatchBuilder func(config []byte, options client.Options) (client.WithWatch, error) - -type remoteController struct { - localClient client.Client - watchCtx context.Context - watchCancel context.CancelFunc - remoteClients map[string]*remoteClient - wlUpdateCh chan<- event.GenericEvent - - // For unit testing only. There is now need of creating fully functional remote clients in the unit tests - // and creating valid kubeconfig content is not trivial. - // The full client creation and usage is validated in the integration and e2e tests. - builderOverride clientWithWatchBuilder -} - -func newRemoteController(watchCtx context.Context, localClient client.Client, wlUpdateCh chan<- event.GenericEvent) *remoteController { - watchCtx, watchCancel := context.WithCancel(watchCtx) - ret := &remoteController{ - localClient: localClient, - watchCtx: watchCtx, - watchCancel: watchCancel, - wlUpdateCh: wlUpdateCh, - } - return ret -} - -func (rc *remoteController) UpdateConfig(kubeConfigs map[string][]byte) error { - if rc.remoteClients == nil { - rc.remoteClients = make(map[string]*remoteClient, len(kubeConfigs)) - } - - for clusterName, c := range rc.remoteClients { - if kubeconfig, found := kubeConfigs[clusterName]; found { - if err := c.setConfig(rc.watchCtx, kubeconfig); err != nil { - delete(rc.remoteClients, clusterName) - return fmt.Errorf("cluster %q: %w", clusterName, err) - } - } else { - c.watchCancel() - delete(rc.remoteClients, clusterName) - } - } - // create the missing ones - for clusterName, kubeconfig := range kubeConfigs { - if _, found := rc.remoteClients[clusterName]; !found { - c := newRemoteClient(rc.localClient, rc.wlUpdateCh) - - c.builderOverride = rc.builderOverride - - if err := c.setConfig(rc.watchCtx, kubeconfig); err != nil { - return fmt.Errorf("cluster %q: %w", clusterName, err) - } - rc.remoteClients[clusterName] = c - } - } - - return nil -} - -func (cc *remoteController) IsActive() bool { - return cc != nil && len(cc.remoteClients) > 0 -} - -type remoteClient struct { - localClient client.Client - client client.WithWatch - wlUpdateCh chan<- event.GenericEvent - watchCancel func() - kubeconfig []byte - - // For unit testing only. There is now need of creating fully functional remote clients in the unit tests - // and creating valid kubeconfig content is not trivial. - // The full client creation and usage is validated in the integration and e2e tests. - builderOverride clientWithWatchBuilder -} - -func newRemoteClient(localClient client.Client, wlUpdateCh chan<- event.GenericEvent) *remoteClient { - rc := &remoteClient{ - wlUpdateCh: wlUpdateCh, - localClient: localClient, - } - - return rc -} - -func newClientWithWatch(kubeconfig []byte, options client.Options) (client.WithWatch, error) { - restConfig, err := clientcmd.RESTConfigFromKubeConfig(kubeconfig) - if err != nil { - return nil, err - } - return client.NewWithWatch(restConfig, options) -} - -// setConfig - will try to recreate the k8s client and restart watching if the new config is different than -// the one currently used. -func (rc *remoteClient) setConfig(watchCtx context.Context, kubeconfig []byte) error { - if equality.Semantic.DeepEqual(kubeconfig, rc.kubeconfig) { - return nil - } - - if rc.watchCancel != nil { - rc.watchCancel() - rc.watchCancel = nil - } - - builder := newClientWithWatch - if rc.builderOverride != nil { - builder = rc.builderOverride - } - remoteClient, err := builder(kubeconfig, client.Options{Scheme: rc.localClient.Scheme()}) - if err != nil { - return err - } - rc.client = remoteClient - - newWatcher, err := rc.client.Watch(watchCtx, &kueue.WorkloadList{}) - if err != nil { - return nil - } - - go func() { - for r := range newWatcher.ResultChan() { - rc.queueWorkloadEvent(watchCtx, r) - } - }() - - rc.watchCancel = newWatcher.Stop - rc.kubeconfig = kubeconfig - return nil -} - -func (rc *remoteClient) queueWorkloadEvent(ctx context.Context, ev watch.Event) { - wl, isWl := ev.Object.(*kueue.Workload) - if !isWl { - return - } - - localWl := &kueue.Workload{} - if err := rc.localClient.Get(ctx, client.ObjectKeyFromObject(wl), localWl); err == nil { - rc.wlUpdateCh <- event.GenericEvent{Object: localWl} - } -} diff --git a/pkg/controller/admissionchecks/multikueue/remoteclient_test.go b/pkg/controller/admissionchecks/multikueue/remoteclient_test.go deleted file mode 100644 index 4d888d705b..0000000000 --- a/pkg/controller/admissionchecks/multikueue/remoteclient_test.go +++ /dev/null @@ -1,135 +0,0 @@ -/* -Copyright 2024 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package multikueue - -import ( - "errors" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -var ( - errInvalidConfig = errors.New("invalid kubeconfig") -) - -func fakeClientBuilder(kubeconfig []byte, options client.Options) (client.WithWatch, error) { - if string(kubeconfig) == "invalid" { - return nil, errInvalidConfig - } - b, _ := getClientBuilder() - return b.Build(), nil -} - -func newTestClient(config string) *remoteClient { - b, _ := getClientBuilder() - localClient := b.Build() - ret := &remoteClient{ - kubeconfig: []byte(config), - localClient: localClient, - - builderOverride: fakeClientBuilder, - } - ret.watchCancel = func() { - ret.kubeconfig = []byte(string(ret.kubeconfig) + " canceled") - } - return ret -} - -func TestUpdateConfig(t *testing.T) { - cases := map[string]struct { - kubeconfigs map[string][]byte - remoteClients map[string]*remoteClient - - wantRemoteClients map[string]*remoteClient - wantError error - }{ - "new valid client is added": { - kubeconfigs: map[string][]byte{ - "worker1": []byte("worker1 kubeconfig"), - }, - wantRemoteClients: map[string]*remoteClient{ - "worker1": { - kubeconfig: []byte("worker1 kubeconfig"), - }, - }, - }, - "update client with valid config": { - remoteClients: map[string]*remoteClient{ - "worker1": newTestClient("worker1 old kubeconfig"), - }, - kubeconfigs: map[string][]byte{ - "worker1": []byte("worker1 kubeconfig"), - }, - wantRemoteClients: map[string]*remoteClient{ - "worker1": { - kubeconfig: []byte("worker1 kubeconfig"), - }, - }, - }, - "update client with invalid config": { - remoteClients: map[string]*remoteClient{ - "worker1": newTestClient("worker1 old kubeconfig"), - }, - kubeconfigs: map[string][]byte{ - "worker1": []byte("invalid"), - }, - wantError: errInvalidConfig, - }, - "missing cluster is removed": { - remoteClients: map[string]*remoteClient{ - "worker1": newTestClient("worker1 kubeconfig"), - "worker2": newTestClient("worker2 kubeconfig"), - }, - kubeconfigs: map[string][]byte{ - "worker1": []byte("worker1 kubeconfig"), - }, - wantRemoteClients: map[string]*remoteClient{ - "worker1": { - kubeconfig: []byte("worker1 kubeconfig"), - }, - }, - }, - } - - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - builder, ctx := getClientBuilder() - c := builder.Build() - - remCtrl := newRemoteController(ctx, c, nil) - if len(tc.remoteClients) > 0 { - remCtrl.remoteClients = tc.remoteClients - } - - remCtrl.builderOverride = fakeClientBuilder - - gotErr := remCtrl.UpdateConfig(tc.kubeconfigs) - if diff := cmp.Diff(tc.wantError, gotErr, cmpopts.EquateErrors()); diff != "" { - t.Errorf("unexpected error (-want/+got):\n%s", diff) - } - - if diff := cmp.Diff(tc.wantRemoteClients, remCtrl.remoteClients, cmpopts.EquateEmpty(), - cmpopts.IgnoreFields(remoteController{}, "localClient", "watchCancel", "watchCtx", "wlUpdateCh"), - cmp.Comparer(func(a, b remoteClient) bool { return string(a.kubeconfig) == string(b.kubeconfig) })); diff != "" { - t.Errorf("unexpected controllers (-want/+got):\n%s", diff) - } - }) - } -} diff --git a/pkg/controller/admissionchecks/multikueue/workload.go b/pkg/controller/admissionchecks/multikueue/workload.go index 3f10e291a6..0d10fbf44b 100644 --- a/pkg/controller/admissionchecks/multikueue/workload.go +++ b/pkg/controller/admissionchecks/multikueue/workload.go @@ -48,10 +48,14 @@ var ( batchv1.SchemeGroupVersion.WithKind("Job").String(): &batchJobAdapter{}, jobset.SchemeGroupVersion.WithKind("JobSet").String(): &jobsetAdapter{}, } + + errNoActiveClusters = errors.New("no active clusters") ) type wlReconciler struct { - acr *ACReconciler + client client.Client + helper *multiKueueStoreHelper + clusters *clustersReconciler } var _ reconcile.Reconciler = (*wlReconciler)(nil) @@ -72,7 +76,7 @@ type jobAdapter interface { type wlGroup struct { local *kueue.Workload remotes map[string]*kueue.Workload - rc *remoteController + remoteClients map[string]*remoteClient acName string jobAdapter jobAdapter controllerKey types.NamespacedName @@ -123,17 +127,17 @@ func (g *wlGroup) RemoveRemoteObjects(ctx context.Context, cluster string) error if remWl == nil { return nil } - if err := g.jobAdapter.DeleteRemoteObject(ctx, g.rc.remoteClients[cluster].client, g.controllerKey); err != nil { + if err := g.jobAdapter.DeleteRemoteObject(ctx, g.remoteClients[cluster].client, g.controllerKey); err != nil { return fmt.Errorf("deleting remote controller object: %w", err) } if controllerutil.RemoveFinalizer(remWl, kueue.ResourceInUseFinalizerName) { - if err := g.rc.remoteClients[cluster].client.Update(ctx, remWl); err != nil { + if err := g.remoteClients[cluster].client.Update(ctx, remWl); err != nil { return fmt.Errorf("removing remote workloads finalizeer: %w", err) } } - err := g.rc.remoteClients[cluster].client.Delete(ctx, remWl) + err := g.remoteClients[cluster].client.Delete(ctx, remWl) if client.IgnoreNotFound(err) != nil { return fmt.Errorf("deleting remote workload: %w", err) } @@ -145,7 +149,7 @@ func (a *wlReconciler) Reconcile(ctx context.Context, req reconcile.Request) (re log := ctrl.LoggerFrom(ctx) log.V(2).Info("Reconcile Workload") wl := &kueue.Workload{} - if err := a.acr.client.Get(ctx, req.NamespacedName, wl); err != nil { + if err := a.client.Get(ctx, req.NamespacedName, wl); err != nil { return reconcile.Result{}, client.IgnoreNotFound(err) } // NOTE: the not found needs to be treated and should result in the deletion of all the remote workloads. @@ -166,8 +170,25 @@ func (a *wlReconciler) Reconcile(ctx context.Context, req reconcile.Request) (re return reconcile.Result{}, a.reconcileGroup(ctx, grp) } +func (w *wlReconciler) remoteClientsForAC(ctx context.Context, acName string) (map[string]*remoteClient, error) { + cfg, err := w.helper.ConfigForAdmissionCheck(ctx, acName) + if err != nil { + return nil, err + } + clients := make(map[string]*remoteClient, len(cfg.Spec.Clusters)) + for _, clusterName := range cfg.Spec.Clusters { + if client, found := w.clusters.controllerFor(clusterName); found { + clients[clusterName] = client + } + } + if len(clients) == 0 { + return nil, errNoActiveClusters + } + return clients, nil +} + func (a *wlReconciler) readGroup(ctx context.Context, local *kueue.Workload) (*wlGroup, error) { - relevantChecks, err := admissioncheck.FilterForController(ctx, a.acr.client, local.Status.AdmissionChecks, ControllerName) + relevantChecks, err := admissioncheck.FilterForController(ctx, a.client, local.Status.AdmissionChecks, ControllerName) if err != nil { return nil, err } @@ -177,13 +198,9 @@ func (a *wlReconciler) readGroup(ctx context.Context, local *kueue.Workload) (*w return nil, nil } - rController, found := a.acr.controllerFor(relevantChecks[0]) - if !found { - return nil, errors.New("remote controller not found") - } - - if !rController.IsActive() { - return nil, errors.New("remote controller is not active") + rClients, err := a.remoteClientsForAC(ctx, relevantChecks[0]) + if err != nil { + return nil, fmt.Errorf("admission check %q: %w", relevantChecks[0], err) } // Lookup the adapter. @@ -202,14 +219,14 @@ func (a *wlReconciler) readGroup(ctx context.Context, local *kueue.Workload) (*w grp := wlGroup{ local: local, - remotes: make(map[string]*kueue.Workload, len(rController.remoteClients)), - rc: rController, + remotes: make(map[string]*kueue.Workload, len(rClients)), + remoteClients: rClients, acName: relevantChecks[0], jobAdapter: adapter, controllerKey: controllerKey, } - for remote, rClient := range rController.remoteClients { + for remote, rClient := range rClients { wl := &kueue.Workload{} err := rClient.client.Get(ctx, client.ObjectKeyFromObject(local), wl) if client.IgnoreNotFound(err) != nil { @@ -244,7 +261,7 @@ func (a *wlReconciler) reconcileGroup(ctx context.Context, group *wlGroup) error // it should not be problematic but the "From remote xxxx:" could be lost .... if group.jobAdapter != nil { - if err := group.jobAdapter.CopyStatusRemoteObject(ctx, a.acr.client, group.rc.remoteClients[remote].client, group.controllerKey); err != nil { + if err := group.jobAdapter.CopyStatusRemoteObject(ctx, a.client, group.remoteClients[remote].client, group.controllerKey); err != nil { log.V(2).Error(err, "copying remote controller status", "workerCluster", remote) // we should retry this return err @@ -261,7 +278,7 @@ func (a *wlReconciler) reconcileGroup(ctx context.Context, group *wlGroup) error Reason: remoteFinishedCond.Reason, Message: fmt.Sprintf("From remote %q: %s", remote, remoteFinishedCond.Message), }) - return a.acr.client.Status().Patch(ctx, wlPatch, client.Apply, client.FieldOwner(ControllerName+"-finish"), client.ForceOwnership) + return a.client.Status().Patch(ctx, wlPatch, client.Apply, client.FieldOwner(ControllerName+"-finish"), client.ForceOwnership) } hasReserving, reservingRemote := group.FirstReserving() @@ -284,7 +301,7 @@ func (a *wlReconciler) reconcileGroup(ctx context.Context, group *wlGroup) error // 3. get the first reserving if hasReserving { acs := workload.FindAdmissionCheck(group.local.Status.AdmissionChecks, group.acName) - if err := group.jobAdapter.CreateRemoteObject(ctx, a.acr.client, group.rc.remoteClients[reservingRemote].client, group.controllerKey, group.local.Name); err != nil { + if err := group.jobAdapter.CreateRemoteObject(ctx, a.client, group.remoteClients[reservingRemote].client, group.controllerKey, group.local.Name); err != nil { log.V(2).Error(err, "creating remote controller object", "remote", reservingRemote) // We'll retry this in the next reconcile. return err @@ -300,7 +317,7 @@ func (a *wlReconciler) reconcileGroup(ctx context.Context, group *wlGroup) error acs.Message = fmt.Sprintf("The workload got reservation on %q", reservingRemote) wlPatch := workload.BaseSSAWorkload(group.local) workload.SetAdmissionCheckState(&wlPatch.Status.AdmissionChecks, *acs) - err := a.acr.client.Status().Patch(ctx, wlPatch, client.Apply, client.FieldOwner(ControllerName), client.ForceOwnership) + err := a.client.Status().Patch(ctx, wlPatch, client.Apply, client.FieldOwner(ControllerName), client.ForceOwnership) if err != nil { return err } @@ -313,7 +330,7 @@ func (a *wlReconciler) reconcileGroup(ctx context.Context, group *wlGroup) error for rem, remWl := range group.remotes { if remWl == nil { clone := cloneForCreate(group.local) - err := group.rc.remoteClients[rem].client.Create(ctx, clone) + err := group.remoteClients[rem].client.Create(ctx, clone) if err != nil { // just log the error for a single remote log.V(2).Error(err, "creating remote object", "remote", rem) @@ -323,6 +340,14 @@ func (a *wlReconciler) reconcileGroup(ctx context.Context, group *wlGroup) error return nil } +func newWlReconciler(c client.Client, helper *multiKueueStoreHelper, cRec *clustersReconciler) *wlReconciler { + return &wlReconciler{ + client: c, + helper: helper, + clusters: cRec, + } +} + func (w *wlReconciler) setupWithManager(mgr ctrl.Manager) error { syncHndl := handler.Funcs{ GenericFunc: func(_ context.Context, e event.GenericEvent, q workqueue.RateLimitingInterface) { @@ -335,7 +360,7 @@ func (w *wlReconciler) setupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&kueue.Workload{}). - WatchesRawSource(&source.Channel{Source: w.acr.wlUpdateCh}, syncHndl). + WatchesRawSource(&source.Channel{Source: w.clusters.wlUpdateCh}, syncHndl). Complete(w) } diff --git a/pkg/controller/admissionchecks/multikueue/workload_test.go b/pkg/controller/admissionchecks/multikueue/workload_test.go index e9d1784beb..9c9829a1dd 100644 --- a/pkg/controller/admissionchecks/multikueue/workload_test.go +++ b/pkg/controller/admissionchecks/multikueue/workload_test.go @@ -291,12 +291,16 @@ func TestWlReconcile(t *testing.T) { manageBuilder = manageBuilder.WithLists(&kueue.WorkloadList{Items: tc.managersWorkloads}, &batchv1.JobList{Items: tc.managersJobs}) manageBuilder = manageBuilder.WithStatusSubresource(slices.Map(tc.managersWorkloads, func(w *kueue.Workload) client.Object { return w })...) manageBuilder = manageBuilder.WithStatusSubresource(slices.Map(tc.managersJobs, func(w *batchv1.Job) client.Object { return w })...) - manageBuilder = manageBuilder.WithObjects(utiltesting.MakeAdmissionCheck("ac1").ControllerName(ControllerName).Obj()) + manageBuilder = manageBuilder.WithObjects( + utiltesting.MakeMultiKueueConfig("config1").Clusters("worker1").Obj(), + utiltesting.MakeAdmissionCheck("ac1").ControllerName(ControllerName). + Parameters(kueue.GroupVersion.Group, "MultiKueueConfig", "config1"). + Obj(), + ) managerClient := manageBuilder.Build() - acr := newACController(managerClient, nil, TestNamespace) - rc := newRemoteController(ctx, managerClient, nil) + cRec := newClustersReconciler(managerClient, TestNamespace) worker1Builder, _ := getClientBuilder() worker1Builder = worker1Builder.WithLists(&kueue.WorkloadList{Items: tc.worker1Workloads}, &batchv1.JobList{Items: tc.worker1Jobs}) @@ -304,14 +308,10 @@ func TestWlReconcile(t *testing.T) { w1remoteClient := newRemoteClient(managerClient, nil) w1remoteClient.client = worker1Client + cRec.clients["worker1"] = w1remoteClient - rc.remoteClients = map[string]*remoteClient{"worker1": w1remoteClient} - - acr.controllers["ac1"] = rc - - reconciler := wlReconciler{ - acr: acr, - } + helper, _ := newMultiKueueStoreHelper(managerClient) + reconciler := newWlReconciler(managerClient, helper, cRec) _, gotErr := reconciler.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Name: tc.reconcileFor, Namespace: TestNamespace}}) if diff := cmp.Diff(tc.wantError, gotErr, cmpopts.EquateErrors()); diff != "" { diff --git a/pkg/util/testing/wrappers.go b/pkg/util/testing/wrappers.go index 1079934cf4..8bb22faeef 100644 --- a/pkg/util/testing/wrappers.go +++ b/pkg/util/testing/wrappers.go @@ -30,6 +30,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" + kueuealpha "sigs.k8s.io/kueue/apis/kueue/v1alpha1" kueue "sigs.k8s.io/kueue/apis/kueue/v1beta1" ) @@ -746,3 +747,64 @@ func (p *WorkloadPriorityClassWrapper) PriorityValue(v int32) *WorkloadPriorityC func (p *WorkloadPriorityClassWrapper) Obj() *kueue.WorkloadPriorityClass { return &p.WorkloadPriorityClass } + +type MultiKueueConfigWrapper struct { + kueuealpha.MultiKueueConfig +} + +func MakeMultiKueueConfig(name string) *MultiKueueConfigWrapper { + return &MultiKueueConfigWrapper{ + MultiKueueConfig: kueuealpha.MultiKueueConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + }, + } +} + +func (mkc *MultiKueueConfigWrapper) Obj() *kueuealpha.MultiKueueConfig { + return &mkc.MultiKueueConfig +} + +func (mkc *MultiKueueConfigWrapper) Clusters(clusters ...string) *MultiKueueConfigWrapper { + mkc.Spec.Clusters = append(mkc.Spec.Clusters, clusters...) + return mkc +} + +type MultiKueueClusterWrapper struct { + kueuealpha.MultiKueueCluster +} + +func MakeMultiKueueCluster(name string) *MultiKueueClusterWrapper { + return &MultiKueueClusterWrapper{ + MultiKueueCluster: kueuealpha.MultiKueueCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + }, + } +} + +func (mkc *MultiKueueClusterWrapper) Obj() *kueuealpha.MultiKueueCluster { + return &mkc.MultiKueueCluster +} + +func (mkc *MultiKueueClusterWrapper) Secret(name, secretName string) *MultiKueueClusterWrapper { + mkc.Spec.KubeconfigRef = kueuealpha.KubeconfigRef{ + Name: name, + Location: secretName, + LocationType: kueuealpha.SecretLocationType, + } + return mkc +} + +func (mkc *MultiKueueClusterWrapper) Active(state metav1.ConditionStatus, reason, message string) *MultiKueueClusterWrapper { + cond := metav1.Condition{ + Type: kueuealpha.MultiKueueClusterActive, + Status: state, + Reason: reason, + Message: message, + } + apimeta.SetStatusCondition(&mkc.Status.Conditions, cond) + return mkc +} diff --git a/test/e2e/multikueue/e2e_test.go b/test/e2e/multikueue/e2e_test.go index 38c3282bcc..71adfe8bc3 100644 --- a/test/e2e/multikueue/e2e_test.go +++ b/test/e2e/multikueue/e2e_test.go @@ -49,11 +49,13 @@ var _ = ginkgo.Describe("MultiKueue", func() { worker1Ns *corev1.Namespace worker2Ns *corev1.Namespace - multiKueueConfig *kueuealpha.MultiKueueConfig - multiKueueAc *kueue.AdmissionCheck - managerFlavor *kueue.ResourceFlavor - managerCq *kueue.ClusterQueue - managerLq *kueue.LocalQueue + multiKueueCluster1 *kueuealpha.MultiKueueCluster + multiKueueCluster2 *kueuealpha.MultiKueueCluster + multiKueueConfig *kueuealpha.MultiKueueConfig + multiKueueAc *kueue.AdmissionCheck + managerFlavor *kueue.ResourceFlavor + managerCq *kueue.ClusterQueue + managerLq *kueue.LocalQueue worker1Flavor *kueue.ResourceFlavor worker1Cq *kueue.ClusterQueue @@ -86,29 +88,13 @@ var _ = ginkgo.Describe("MultiKueue", func() { } gomega.Expect(k8sWorker2Client.Create(ctx, worker2Ns)).To(gomega.Succeed()) - multiKueueConfig = &kueuealpha.MultiKueueConfig{ - ObjectMeta: metav1.ObjectMeta{ - Name: "multikueueconfig", - }, - Spec: kueuealpha.MultiKueueConfigSpec{ - Clusters: []kueuealpha.MultiKueueCluster{ - { - Name: "worker1", - KubeconfigRef: kueuealpha.KubeconfigRef{ - Location: "multikueue1", - LocationType: kueuealpha.SecretLocationType, - }, - }, - { - Name: "worker2", - KubeconfigRef: kueuealpha.KubeconfigRef{ - Location: "multikueue2", - LocationType: kueuealpha.SecretLocationType, - }, - }, - }, - }, - } + multiKueueCluster1 = utiltesting.MakeMultiKueueCluster("worker1").Secret("worker1", "multikueue1").Obj() + gomega.Expect(k8sManagerClient.Create(ctx, multiKueueCluster1)).To(gomega.Succeed()) + + multiKueueCluster2 = utiltesting.MakeMultiKueueCluster("worker2").Secret("worker2", "multikueue2").Obj() + gomega.Expect(k8sManagerClient.Create(ctx, multiKueueCluster2)).To(gomega.Succeed()) + + multiKueueConfig = utiltesting.MakeMultiKueueConfig("multikueueconfig").Clusters("worker1", "worker2").Obj() gomega.Expect(k8sManagerClient.Create(ctx, multiKueueConfig)).Should(gomega.Succeed()) multiKueueAc = utiltesting.MakeAdmissionCheck("ac1"). @@ -191,6 +177,8 @@ var _ = ginkgo.Describe("MultiKueue", func() { util.ExpectResourceFlavorToBeDeleted(ctx, k8sManagerClient, managerFlavor, true) util.ExpectAdmissionCheckToBeDeleted(ctx, k8sManagerClient, multiKueueAc, true) gomega.Expect(k8sManagerClient.Delete(ctx, multiKueueConfig)).To(gomega.Succeed()) + gomega.Expect(k8sManagerClient.Delete(ctx, multiKueueCluster1)).To(gomega.Succeed()) + gomega.Expect(k8sManagerClient.Delete(ctx, multiKueueCluster2)).To(gomega.Succeed()) }) ginkgo.When("Creating a multikueue admission check", func() { diff --git a/test/integration/multikueue/multikueue_test.go b/test/integration/multikueue/multikueue_test.go index 4ab2021b39..38e5e61404 100644 --- a/test/integration/multikueue/multikueue_test.go +++ b/test/integration/multikueue/multikueue_test.go @@ -48,6 +48,8 @@ var _ = ginkgo.Describe("Multikueue", func() { managerMultikueueSecret1 *corev1.Secret managerMultikueueSecret2 *corev1.Secret + managerClusterWorker1 *kueuealpha.MultiKueueCluster + managerClusterWorker2 *kueuealpha.MultiKueueCluster managerMultiKueueConfig *kueuealpha.MultiKueueConfig multikueueAC *kueue.AdmissionCheck managerCq *kueue.ClusterQueue @@ -109,29 +111,13 @@ var _ = ginkgo.Describe("Multikueue", func() { } gomega.Expect(managerCluster.client.Create(managerCluster.ctx, managerMultikueueSecret2)).To(gomega.Succeed()) - managerMultiKueueConfig = &kueuealpha.MultiKueueConfig{ - ObjectMeta: metav1.ObjectMeta{ - Name: "multikueueconfig", - }, - Spec: kueuealpha.MultiKueueConfigSpec{ - Clusters: []kueuealpha.MultiKueueCluster{ - { - Name: "worker1", - KubeconfigRef: kueuealpha.KubeconfigRef{ - Location: "multikueue1", - LocationType: kueuealpha.SecretLocationType, - }, - }, - { - Name: "worker2", - KubeconfigRef: kueuealpha.KubeconfigRef{ - Location: "multikueue2", - LocationType: kueuealpha.SecretLocationType, - }, - }, - }, - }, - } + managerClusterWorker1 = utiltesting.MakeMultiKueueCluster("worker1").Secret("worker1", managerMultikueueSecret1.Name).Obj() + gomega.Expect(managerCluster.client.Create(managerCluster.ctx, managerClusterWorker1)).To(gomega.Succeed()) + + managerClusterWorker2 = utiltesting.MakeMultiKueueCluster("worker2").Secret("worker2", managerMultikueueSecret2.Name).Obj() + gomega.Expect(managerCluster.client.Create(managerCluster.ctx, managerClusterWorker2)).To(gomega.Succeed()) + + managerMultiKueueConfig = utiltesting.MakeMultiKueueConfig("multikueueconfig").Clusters(managerClusterWorker1.Name, managerClusterWorker2.Name).Obj() gomega.Expect(managerCluster.client.Create(managerCluster.ctx, managerMultiKueueConfig)).Should(gomega.Succeed()) multikueueAC = utiltesting.MakeAdmissionCheck("ac1"). @@ -179,6 +165,8 @@ var _ = ginkgo.Describe("Multikueue", func() { util.ExpectClusterQueueToBeDeleted(worker2Cluster.ctx, worker2Cluster.client, worker2Cq, true) util.ExpectAdmissionCheckToBeDeleted(managerCluster.ctx, managerCluster.client, multikueueAC, true) gomega.Expect(managerCluster.client.Delete(managerCluster.ctx, managerMultiKueueConfig)).To(gomega.Succeed()) + gomega.Expect(managerCluster.client.Delete(managerCluster.ctx, managerClusterWorker1)).To(gomega.Succeed()) + gomega.Expect(managerCluster.client.Delete(managerCluster.ctx, managerClusterWorker2)).To(gomega.Succeed()) gomega.Expect(managerCluster.client.Delete(managerCluster.ctx, managerMultikueueSecret1)).To(gomega.Succeed()) gomega.Expect(managerCluster.client.Delete(managerCluster.ctx, managerMultikueueSecret2)).To(gomega.Succeed()) }) From d298b376e74f483acfb777794db5572d81c7ae4b Mon Sep 17 00:00:00 2001 From: Traian Schiau Date: Tue, 23 Jan 2024 15:24:33 +0200 Subject: [PATCH 3/9] Fix after rebase --- .../multikueue/jobset_adapter_test.go | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/pkg/controller/admissionchecks/multikueue/jobset_adapter_test.go b/pkg/controller/admissionchecks/multikueue/jobset_adapter_test.go index 507e219078..90811b29cf 100644 --- a/pkg/controller/admissionchecks/multikueue/jobset_adapter_test.go +++ b/pkg/controller/admissionchecks/multikueue/jobset_adapter_test.go @@ -223,12 +223,16 @@ func TestWlReconcileJobset(t *testing.T) { manageBuilder = manageBuilder.WithLists(&kueue.WorkloadList{Items: tc.managersWorkloads}, &jobset.JobSetList{Items: tc.managersJobSets}) manageBuilder = manageBuilder.WithStatusSubresource(slices.Map(tc.managersWorkloads, func(w *kueue.Workload) client.Object { return w })...) manageBuilder = manageBuilder.WithStatusSubresource(slices.Map(tc.managersJobSets, func(w *jobset.JobSet) client.Object { return w })...) - manageBuilder = manageBuilder.WithObjects(utiltesting.MakeAdmissionCheck("ac1").ControllerName(ControllerName).Obj()) + manageBuilder = manageBuilder.WithObjects( + utiltesting.MakeMultiKueueConfig("config1").Clusters("worker1").Obj(), + utiltesting.MakeAdmissionCheck("ac1").ControllerName(ControllerName). + Parameters(kueue.GroupVersion.Group, "MultiKueueConfig", "config1"). + Obj(), + ) managerClient := manageBuilder.Build() - acr := newACController(managerClient, nil, TestNamespace) - rc := newRemoteController(ctx, managerClient, nil) + cRec := newClustersReconciler(managerClient, TestNamespace) worker1Builder, _ := getClientBuilder() worker1Builder = worker1Builder.WithLists(&kueue.WorkloadList{Items: tc.worker1Workloads}, &jobset.JobSetList{Items: tc.worker1JobSets}) @@ -237,13 +241,10 @@ func TestWlReconcileJobset(t *testing.T) { w1remoteClient := newRemoteClient(managerClient, nil) w1remoteClient.client = worker1Client - rc.remoteClients = map[string]*remoteClient{"worker1": w1remoteClient} - - acr.controllers["ac1"] = rc + cRec.clients["worker1"] = w1remoteClient - reconciler := wlReconciler{ - acr: acr, - } + helper, _ := newMultiKueueStoreHelper(managerClient) + reconciler := newWlReconciler(managerClient, helper, cRec) _, gotErr := reconciler.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Name: "wl1", Namespace: TestNamespace}}) if gotErr != nil { From 926666ffc25052902d0648248f53286c7dd07eb9 Mon Sep 17 00:00:00 2001 From: Traian Schiau Date: Tue, 23 Jan 2024 17:16:52 +0200 Subject: [PATCH 4/9] Review Remarks --- apis/kueue/v1alpha1/multikueueconfig_types.go | 31 ++++---- apis/kueue/v1alpha1/zz_generated.deepcopy.go | 10 +-- .../kueue.x-k8s.io_multikueueclusters.yaml | 1 + .../{kubeconfigref.go => kubeconfig.go} | 16 ++-- .../kueue/v1alpha1/multikueueclusterspec.go | 10 +-- client-go/applyconfiguration/utils.go | 4 +- .../kueue.x-k8s.io_multikueueclusters.yaml | 1 + .../multikueue/admissioncheck.go | 28 +++---- .../admissionchecks/multikueue/indexer.go | 2 +- .../multikueue/indexer_test.go | 77 +++++++++++++++---- .../multikueue/multikueuecluster.go | 16 ++-- pkg/util/testing/wrappers.go | 2 +- 12 files changed, 125 insertions(+), 73 deletions(-) rename client-go/applyconfiguration/kueue/v1alpha1/{kubeconfigref.go => kubeconfig.go} (72%) diff --git a/apis/kueue/v1alpha1/multikueueconfig_types.go b/apis/kueue/v1alpha1/multikueueconfig_types.go index 1fa40fb5d5..db43b5390a 100644 --- a/apis/kueue/v1alpha1/multikueueconfig_types.go +++ b/apis/kueue/v1alpha1/multikueueconfig_types.go @@ -36,7 +36,7 @@ const ( SecretLocationType LocationType = "Secret" ) -type KubeconfigRef struct { +type KubeConfig struct { // Name of the cluster inside the given KubeConfig. Name string `json:"name"` @@ -51,19 +51,20 @@ type KubeconfigRef struct { } type MultiKueueClusterSpec struct { - KubeconfigRef KubeconfigRef `json:"kubeconfigRef"` + // Information how to connect to the cluster. + KubeConfig KubeConfig `json:"kubeconfigRef"` } type MultiKueueClusterStatus struct { Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` } -//+genclient -//+genclient:nonNamespaced -//+kubebuilder:object:root=true -//+kubebuilder:storageversion -//+kubebuilder:subresource:status -//+kubebuilder:resource:scope=Cluster +// +genclient +// +genclient:nonNamespaced +// +kubebuilder:object:root=true +// +kubebuilder:storageversion +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster // MultiKueueCluster is the Schema for the multikueue API type MultiKueueCluster struct { @@ -74,7 +75,7 @@ type MultiKueueCluster struct { Status MultiKueueClusterStatus `json:"status,omitempty"` } -//+kubebuilder:object:root=true +// +kubebuilder:object:root=true // MultiKueueClusterList contains a list of MultiKueueCluster type MultiKueueClusterList struct { @@ -93,11 +94,11 @@ type MultiKueueConfigSpec struct { Clusters []string `json:"clusters"` } -//+genclient -//+genclient:nonNamespaced -//+kubebuilder:object:root=true -//+kubebuilder:storageversion -//+kubebuilder:resource:scope=Cluster +// +genclient +// +genclient:nonNamespaced +// +kubebuilder:object:root=true +// +kubebuilder:storageversion +// +kubebuilder:resource:scope=Cluster // MultiKueueConfig is the Schema for the multikueue API type MultiKueueConfig struct { @@ -107,7 +108,7 @@ type MultiKueueConfig struct { Spec MultiKueueConfigSpec `json:"spec,omitempty"` } -//+kubebuilder:object:root=true +// +kubebuilder:object:root=true // MultiKueueConfigList contains a list of MultiKueueConfig type MultiKueueConfigList struct { diff --git a/apis/kueue/v1alpha1/zz_generated.deepcopy.go b/apis/kueue/v1alpha1/zz_generated.deepcopy.go index cfd70d1936..8a32b2bf9f 100644 --- a/apis/kueue/v1alpha1/zz_generated.deepcopy.go +++ b/apis/kueue/v1alpha1/zz_generated.deepcopy.go @@ -27,16 +27,16 @@ import ( ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeconfigRef) DeepCopyInto(out *KubeconfigRef) { +func (in *KubeConfig) DeepCopyInto(out *KubeConfig) { *out = *in } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeconfigRef. -func (in *KubeconfigRef) DeepCopy() *KubeconfigRef { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeConfig. +func (in *KubeConfig) DeepCopy() *KubeConfig { if in == nil { return nil } - out := new(KubeconfigRef) + out := new(KubeConfig) in.DeepCopyInto(out) return out } @@ -103,7 +103,7 @@ func (in *MultiKueueClusterList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MultiKueueClusterSpec) DeepCopyInto(out *MultiKueueClusterSpec) { *out = *in - out.KubeconfigRef = in.KubeconfigRef + out.KubeConfig = in.KubeConfig } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiKueueClusterSpec. diff --git a/charts/kueue/templates/crd/kueue.x-k8s.io_multikueueclusters.yaml b/charts/kueue/templates/crd/kueue.x-k8s.io_multikueueclusters.yaml index 46af7d997c..696bdd72b3 100644 --- a/charts/kueue/templates/crd/kueue.x-k8s.io_multikueueclusters.yaml +++ b/charts/kueue/templates/crd/kueue.x-k8s.io_multikueueclusters.yaml @@ -47,6 +47,7 @@ spec: spec: properties: kubeconfigRef: + description: Information how to connect to the cluster. properties: location: description: Location of the KubeConfig. diff --git a/client-go/applyconfiguration/kueue/v1alpha1/kubeconfigref.go b/client-go/applyconfiguration/kueue/v1alpha1/kubeconfig.go similarity index 72% rename from client-go/applyconfiguration/kueue/v1alpha1/kubeconfigref.go rename to client-go/applyconfiguration/kueue/v1alpha1/kubeconfig.go index aaea3ff62f..aa492329e5 100644 --- a/client-go/applyconfiguration/kueue/v1alpha1/kubeconfigref.go +++ b/client-go/applyconfiguration/kueue/v1alpha1/kubeconfig.go @@ -21,24 +21,24 @@ import ( v1alpha1 "sigs.k8s.io/kueue/apis/kueue/v1alpha1" ) -// KubeconfigRefApplyConfiguration represents an declarative configuration of the KubeconfigRef type for use +// KubeConfigApplyConfiguration represents an declarative configuration of the KubeConfig type for use // with apply. -type KubeconfigRefApplyConfiguration struct { +type KubeConfigApplyConfiguration struct { Name *string `json:"name,omitempty"` Location *string `json:"location,omitempty"` LocationType *v1alpha1.LocationType `json:"locationType,omitempty"` } -// KubeconfigRefApplyConfiguration constructs an declarative configuration of the KubeconfigRef type for use with +// KubeConfigApplyConfiguration constructs an declarative configuration of the KubeConfig type for use with // apply. -func KubeconfigRef() *KubeconfigRefApplyConfiguration { - return &KubeconfigRefApplyConfiguration{} +func KubeConfig() *KubeConfigApplyConfiguration { + return &KubeConfigApplyConfiguration{} } // WithName sets the Name field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. -func (b *KubeconfigRefApplyConfiguration) WithName(value string) *KubeconfigRefApplyConfiguration { +func (b *KubeConfigApplyConfiguration) WithName(value string) *KubeConfigApplyConfiguration { b.Name = &value return b } @@ -46,7 +46,7 @@ func (b *KubeconfigRefApplyConfiguration) WithName(value string) *KubeconfigRefA // WithLocation sets the Location field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Location field is set to the value of the last call. -func (b *KubeconfigRefApplyConfiguration) WithLocation(value string) *KubeconfigRefApplyConfiguration { +func (b *KubeConfigApplyConfiguration) WithLocation(value string) *KubeConfigApplyConfiguration { b.Location = &value return b } @@ -54,7 +54,7 @@ func (b *KubeconfigRefApplyConfiguration) WithLocation(value string) *Kubeconfig // WithLocationType sets the LocationType field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LocationType field is set to the value of the last call. -func (b *KubeconfigRefApplyConfiguration) WithLocationType(value v1alpha1.LocationType) *KubeconfigRefApplyConfiguration { +func (b *KubeConfigApplyConfiguration) WithLocationType(value v1alpha1.LocationType) *KubeConfigApplyConfiguration { b.LocationType = &value return b } diff --git a/client-go/applyconfiguration/kueue/v1alpha1/multikueueclusterspec.go b/client-go/applyconfiguration/kueue/v1alpha1/multikueueclusterspec.go index eb64f01800..af40891fc2 100644 --- a/client-go/applyconfiguration/kueue/v1alpha1/multikueueclusterspec.go +++ b/client-go/applyconfiguration/kueue/v1alpha1/multikueueclusterspec.go @@ -20,7 +20,7 @@ package v1alpha1 // MultiKueueClusterSpecApplyConfiguration represents an declarative configuration of the MultiKueueClusterSpec type for use // with apply. type MultiKueueClusterSpecApplyConfiguration struct { - KubeconfigRef *KubeconfigRefApplyConfiguration `json:"kubeconfigRef,omitempty"` + KubeConfig *KubeConfigApplyConfiguration `json:"kubeconfigRef,omitempty"` } // MultiKueueClusterSpecApplyConfiguration constructs an declarative configuration of the MultiKueueClusterSpec type for use with @@ -29,10 +29,10 @@ func MultiKueueClusterSpec() *MultiKueueClusterSpecApplyConfiguration { return &MultiKueueClusterSpecApplyConfiguration{} } -// WithKubeconfigRef sets the KubeconfigRef field in the declarative configuration to the given value +// WithKubeConfig sets the KubeConfig field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the KubeconfigRef field is set to the value of the last call. -func (b *MultiKueueClusterSpecApplyConfiguration) WithKubeconfigRef(value *KubeconfigRefApplyConfiguration) *MultiKueueClusterSpecApplyConfiguration { - b.KubeconfigRef = value +// If called multiple times, the KubeConfig field is set to the value of the last call. +func (b *MultiKueueClusterSpecApplyConfiguration) WithKubeConfig(value *KubeConfigApplyConfiguration) *MultiKueueClusterSpecApplyConfiguration { + b.KubeConfig = value return b } diff --git a/client-go/applyconfiguration/utils.go b/client-go/applyconfiguration/utils.go index 690ae44cdc..671a65e7c4 100644 --- a/client-go/applyconfiguration/utils.go +++ b/client-go/applyconfiguration/utils.go @@ -32,8 +32,8 @@ import ( func ForKind(kind schema.GroupVersionKind) interface{} { switch kind { // Group=kueue.x-k8s.io, Version=v1alpha1 - case v1alpha1.SchemeGroupVersion.WithKind("KubeconfigRef"): - return &kueuev1alpha1.KubeconfigRefApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("KubeConfig"): + return &kueuev1alpha1.KubeConfigApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("MultiKueueCluster"): return &kueuev1alpha1.MultiKueueClusterApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("MultiKueueClusterSpec"): diff --git a/config/components/crd/bases/kueue.x-k8s.io_multikueueclusters.yaml b/config/components/crd/bases/kueue.x-k8s.io_multikueueclusters.yaml index 3d15e011af..d934088e28 100644 --- a/config/components/crd/bases/kueue.x-k8s.io_multikueueclusters.yaml +++ b/config/components/crd/bases/kueue.x-k8s.io_multikueueclusters.yaml @@ -34,6 +34,7 @@ spec: spec: properties: kubeconfigRef: + description: Information how to connect to the cluster. properties: location: description: Location of the KubeConfig. diff --git a/pkg/controller/admissionchecks/multikueue/admissioncheck.go b/pkg/controller/admissionchecks/multikueue/admissioncheck.go index 9fc236b863..cbfe41d1db 100644 --- a/pkg/controller/admissionchecks/multikueue/admissioncheck.go +++ b/pkg/controller/admissionchecks/multikueue/admissioncheck.go @@ -65,12 +65,10 @@ func (a *ACReconciler) Reconcile(ctx context.Context, req reconcile.Request) (re return reconcile.Result{}, client.IgnoreNotFound(err) } - active := true inactiveReason := "" log.V(2).Info("Reconcile AdmissionCheck") if cfg, err := a.helper.ConfigFromRef(ctx, ac.Spec.Parameters); err != nil { - active = false inactiveReason = fmt.Sprintf("Cannot load the AdmissionChecks parameters: %s", err.Error()) } else { var missingClusters []string @@ -80,6 +78,7 @@ func (a *ACReconciler) Reconcile(ctx context.Context, req reconcile.Request) (re cluster := &kueuealpha.MultiKueueCluster{} err := a.client.Get(ctx, types.NamespacedName{Name: clusterName}, cluster) if client.IgnoreNotFound(err) != nil { + log.Error(err, "reading cluster", "multiKueueCluster", clusterName) return reconcile.Result{}, err } @@ -94,23 +93,18 @@ func (a *ACReconciler) Reconcile(ctx context.Context, req reconcile.Request) (re var messageParts []string if len(missingClusters) > 0 { - active = false messageParts = []string{fmt.Sprintf("Missing clusters: %v", missingClusters)} } if len(inactiveClusters) > 0 { - active = false messageParts = append(messageParts, fmt.Sprintf("Inactive clusters: %v", inactiveClusters)) } inactiveReason = strings.Join(messageParts, ", ") } newCondition := metav1.Condition{ - Type: kueue.AdmissionCheckActive, - Status: metav1.ConditionTrue, - Reason: "Active", - Message: "The admission check is active", + Type: kueue.AdmissionCheckActive, } - if active { + if len(inactiveReason) == 0 { newCondition.Status = metav1.ConditionTrue newCondition.Reason = "Active" newCondition.Message = "The admission check is active" @@ -167,7 +161,7 @@ func (m *mkConfigHandler) Create(ctx context.Context, event event.CreateEvent, q } if err := queueReconcileForConfigUsers(ctx, mkc.Name, m.client, q); err != nil { - ctrl.LoggerFrom(ctx).V(5).Error(err, "Failure on create event", "multiKueueConfig", klog.KObj(mkc)) + ctrl.LoggerFrom(ctx).V(2).Error(err, "Failure on create event", "multiKueueConfig", klog.KObj(mkc)) } } @@ -179,7 +173,7 @@ func (m *mkConfigHandler) Update(ctx context.Context, event event.UpdateEvent, q } if err := queueReconcileForConfigUsers(ctx, oldMKC.Name, m.client, q); err != nil { - ctrl.LoggerFrom(ctx).V(5).Error(err, "Failure on update event", "multiKueueConfig", klog.KObj(oldMKC)) + ctrl.LoggerFrom(ctx).V(2).Error(err, "Failure on update event", "multiKueueConfig", klog.KObj(oldMKC)) } } @@ -190,7 +184,7 @@ func (m *mkConfigHandler) Delete(ctx context.Context, event event.DeleteEvent, q } if err := queueReconcileForConfigUsers(ctx, mkc.Name, m.client, q); err != nil { - ctrl.LoggerFrom(ctx).V(5).Error(err, "Failure on delete event", "multiKueueConfig", klog.KObj(mkc)) + ctrl.LoggerFrom(ctx).V(2).Error(err, "Failure on delete event", "multiKueueConfig", klog.KObj(mkc)) } } @@ -201,7 +195,7 @@ func (m *mkConfigHandler) Generic(ctx context.Context, event event.GenericEvent, } if err := queueReconcileForConfigUsers(ctx, mkc.Name, m.client, q); err != nil { - ctrl.LoggerFrom(ctx).V(5).Error(err, "Failure on generic event", "multiKueueConfig", klog.KObj(mkc)) + ctrl.LoggerFrom(ctx).V(2).Error(err, "Failure on generic event", "multiKueueConfig", klog.KObj(mkc)) } } @@ -237,7 +231,7 @@ func (m *mkClusterHandler) Create(ctx context.Context, event event.CreateEvent, } if err := queueReconcileForConfigUsers(ctx, mkc.Name, m.client, q); err != nil { - ctrl.LoggerFrom(ctx).V(5).Error(err, "Failure on create event", "multiKueueConfig", klog.KObj(mkc)) + ctrl.LoggerFrom(ctx).V(2).Error(err, "Failure on create event", "multiKueueConfig", klog.KObj(mkc)) } } @@ -252,7 +246,7 @@ func (m *mkClusterHandler) Update(ctx context.Context, event event.UpdateEvent, newActive := apimeta.IsStatusConditionTrue(newMKC.Status.Conditions, kueuealpha.MultiKueueClusterActive) if oldActive != newActive { if err := m.queue(ctx, newMKC, q); err != nil { - ctrl.LoggerFrom(ctx).V(5).Error(err, "Failure on update event", "multiKueueCluster", klog.KObj(oldMKC)) + ctrl.LoggerFrom(ctx).V(2).Error(err, "Failure on update event", "multiKueueCluster", klog.KObj(oldMKC)) } } } @@ -264,7 +258,7 @@ func (m *mkClusterHandler) Delete(ctx context.Context, event event.DeleteEvent, } if err := m.queue(ctx, mkc, q); err != nil { - ctrl.LoggerFrom(ctx).V(5).Error(err, "Failure on delete event", "multiKueueCluster", klog.KObj(mkc)) + ctrl.LoggerFrom(ctx).V(2).Error(err, "Failure on delete event", "multiKueueCluster", klog.KObj(mkc)) } } @@ -275,7 +269,7 @@ func (m *mkClusterHandler) Generic(ctx context.Context, event event.GenericEvent } if err := m.queue(ctx, mkc, q); err != nil { - ctrl.LoggerFrom(ctx).V(5).Error(err, "Failure on generic event", "multiKueueCluster", klog.KObj(mkc)) + ctrl.LoggerFrom(ctx).V(2).Error(err, "Failure on generic event", "multiKueueCluster", klog.KObj(mkc)) } } diff --git a/pkg/controller/admissionchecks/multikueue/indexer.go b/pkg/controller/admissionchecks/multikueue/indexer.go index 8cc43bd02a..3ff9d2fb63 100644 --- a/pkg/controller/admissionchecks/multikueue/indexer.go +++ b/pkg/controller/admissionchecks/multikueue/indexer.go @@ -44,7 +44,7 @@ func getIndexUsingKubeConfigs(configNamespace string) func(obj client.Object) [] if !isCluster { return nil } - return []string{strings.Join([]string{configNamespace, cluster.Spec.KubeconfigRef.Location}, "/")} + return []string{strings.Join([]string{configNamespace, cluster.Spec.KubeConfig.Location}, "/")} } } diff --git a/pkg/controller/admissionchecks/multikueue/indexer_test.go b/pkg/controller/admissionchecks/multikueue/indexer_test.go index 99122b23e6..bc9bd8c52a 100644 --- a/pkg/controller/admissionchecks/multikueue/indexer_test.go +++ b/pkg/controller/admissionchecks/multikueue/indexer_test.go @@ -65,39 +65,31 @@ func getClientBuilder() (*fake.ClientBuilder, context.Context) { return builder, ctx } -func TestMultikueConfigUsingKubeconfig(t *testing.T) { +func TestListMultikueClustersUsingKubeconfig(t *testing.T) { cases := map[string]struct { - configs []*kueuealpha.MultiKueueCluster + clusters []*kueuealpha.MultiKueueCluster filter client.ListOption wantListError error wantList []string }{ "no clusters": { - configs: []*kueuealpha.MultiKueueCluster{ - { - ObjectMeta: metav1.ObjectMeta{ - Namespace: "default", - Name: "name", - }, - }, - }, filter: client.MatchingFields{UsingKubeConfigs: TestNamespace + "/secret1"}, }, "single cluster, single match": { - configs: []*kueuealpha.MultiKueueCluster{ + clusters: []*kueuealpha.MultiKueueCluster{ utiltesting.MakeMultiKueueCluster("cluster1").Secret("", "secret1").Obj(), }, filter: client.MatchingFields{UsingKubeConfigs: TestNamespace + "/secret1"}, wantList: []string{"cluster1"}, }, "single cluster, no match": { - configs: []*kueuealpha.MultiKueueCluster{ + clusters: []*kueuealpha.MultiKueueCluster{ utiltesting.MakeMultiKueueCluster("cluster2").Secret("", "secret2").Obj(), }, filter: client.MatchingFields{UsingKubeConfigs: TestNamespace + "/secret1"}, }, "multiple clusters, single match": { - configs: []*kueuealpha.MultiKueueCluster{ + clusters: []*kueuealpha.MultiKueueCluster{ utiltesting.MakeMultiKueueCluster("cluster1").Secret("", "secret1").Obj(), utiltesting.MakeMultiKueueCluster("cluster2").Secret("", "secret2").Obj(), }, @@ -109,7 +101,7 @@ func TestMultikueConfigUsingKubeconfig(t *testing.T) { t.Run(name, func(t *testing.T) { builder, ctx := getClientBuilder() k8sclient := builder.Build() - for _, req := range tc.configs { + for _, req := range tc.clusters { if err := k8sclient.Create(ctx, req); err != nil { t.Errorf("Unable to create %s request: %v", client.ObjectKeyFromObject(req), err) } @@ -129,3 +121,60 @@ func TestMultikueConfigUsingKubeconfig(t *testing.T) { }) } } + +func TestListMultikueConfigsUsingMultikueueClusters(t *testing.T) { + cases := map[string]struct { + configs []*kueuealpha.MultiKueueConfig + filter client.ListOption + wantListError error + wantList []string + }{ + "no configs": { + filter: client.MatchingFields{UsingMultiKueueClusters: "cluster1"}, + }, + "single config, single match": { + configs: []*kueuealpha.MultiKueueConfig{ + utiltesting.MakeMultiKueueConfig("config1").Clusters("cluster1", "cluster2").Obj(), + }, + filter: client.MatchingFields{UsingMultiKueueClusters: "cluster2"}, + wantList: []string{"config1"}, + }, + "single config, no match": { + configs: []*kueuealpha.MultiKueueConfig{ + utiltesting.MakeMultiKueueConfig("config2").Clusters("cluster2").Obj(), + }, + filter: client.MatchingFields{UsingMultiKueueClusters: "cluster1"}, + }, + "multiple configs, single match": { + configs: []*kueuealpha.MultiKueueConfig{ + utiltesting.MakeMultiKueueConfig("config1").Clusters("cluster1", "cluster2").Obj(), + utiltesting.MakeMultiKueueConfig("config2").Clusters("cluster2").Obj(), + }, + filter: client.MatchingFields{UsingMultiKueueClusters: "cluster1"}, + wantList: []string{"config1"}, + }, + } + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + builder, ctx := getClientBuilder() + k8sclient := builder.Build() + for _, config := range tc.configs { + if err := k8sclient.Create(ctx, config); err != nil { + t.Errorf("Unable to create %s config: %v", client.ObjectKeyFromObject(config), err) + } + } + + lst := &kueuealpha.MultiKueueConfigList{} + + gotListErr := k8sclient.List(ctx, lst, tc.filter) + if diff := cmp.Diff(tc.wantListError, gotListErr); diff != "" { + t.Errorf("unexpected list error (-want/+got):\n%s", diff) + } + + gotList := slices.Map(lst.Items, func(mkc *kueuealpha.MultiKueueConfig) string { return mkc.Name }) + if diff := cmp.Diff(tc.wantList, gotList, cmpopts.EquateEmpty(), cmpopts.SortSlices(func(a, b string) bool { return a < b })); diff != "" { + t.Errorf("unexpected list (-want/+got):\n%s", diff) + } + }) + } +} diff --git a/pkg/controller/admissionchecks/multikueue/multikueuecluster.go b/pkg/controller/admissionchecks/multikueue/multikueuecluster.go index 2116210520..23e25d630c 100644 --- a/pkg/controller/admissionchecks/multikueue/multikueuecluster.go +++ b/pkg/controller/admissionchecks/multikueue/multikueuecluster.go @@ -121,6 +121,9 @@ func (rc *remoteClient) queueWorkloadEvent(ctx context.Context, ev watch.Event) localWl := &kueue.Workload{} if err := rc.localClient.Get(ctx, client.ObjectKeyFromObject(wl), localWl); err == nil { + if !apierrors.IsNotFound(err) { + ctrl.LoggerFrom(ctx).Error(err, "reading local workload") + } rc.wlUpdateCh <- event.GenericEvent{Object: localWl} } } @@ -164,7 +167,7 @@ func (c *clustersReconciler) stopAndRemoveCluster(clusterName string) { } } -func (c *clustersReconciler) setRemoteClientConfig(clusterName string, kubeconfig []byte) error { +func (c *clustersReconciler) setRemoteClientConfig(ctx context.Context, clusterName string, kubeconfig []byte) error { c.lock.Lock() defer c.lock.Unlock() @@ -178,6 +181,7 @@ func (c *clustersReconciler) setRemoteClientConfig(clusterName string, kubeconfi } if err := client.setConfig(c.rootContext, kubeconfig); err != nil { + ctrl.LoggerFrom(ctx).Error(err, "reading local workload") delete(c.clients, clusterName) return err } @@ -194,6 +198,7 @@ func (a *clustersReconciler) controllerFor(acName string) (*remoteClient, bool) func (c *clustersReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { cluster := &kueuealpha.MultiKueueCluster{} + log := ctrl.LoggerFrom(ctx) err := c.client.Get(ctx, req.NamespacedName, cluster) if client.IgnoreNotFound(err) != nil { @@ -202,26 +207,27 @@ func (c *clustersReconciler) Reconcile(ctx context.Context, req reconcile.Reques if err != nil || !cluster.DeletionTimestamp.IsZero() { c.stopAndRemoveCluster(req.Name) - // notify ac reconcilere return reconcile.Result{}, nil } // get the kubeconfig - kubeConfig, retry, err := c.getKubeConfig(ctx, &cluster.Spec.KubeconfigRef) + kubeConfig, retry, err := c.getKubeConfig(ctx, &cluster.Spec.KubeConfig) if retry { return reconcile.Result{}, nil } if err != nil { + log.Error(err, "reading kubeconfig") return reconcile.Result{}, c.updateStatus(ctx, cluster, false, "BadConfig", err.Error()) } - if err := c.setRemoteClientConfig(cluster.Name, kubeConfig); err != nil { + if err := c.setRemoteClientConfig(ctx, cluster.Name, kubeConfig); err != nil { + log.Error(err, "setting kubeconfig") return reconcile.Result{}, c.updateStatus(ctx, cluster, false, "ClientConnectionFailed", err.Error()) } return reconcile.Result{}, c.updateStatus(ctx, cluster, true, "Active", "Connected") } -func (c *clustersReconciler) getKubeConfig(ctx context.Context, ref *kueuealpha.KubeconfigRef) ([]byte, bool, error) { +func (c *clustersReconciler) getKubeConfig(ctx context.Context, ref *kueuealpha.KubeConfig) ([]byte, bool, error) { sec := corev1.Secret{} secretObjKey := types.NamespacedName{ Namespace: c.configNamespace, diff --git a/pkg/util/testing/wrappers.go b/pkg/util/testing/wrappers.go index 8bb22faeef..20c11fc81b 100644 --- a/pkg/util/testing/wrappers.go +++ b/pkg/util/testing/wrappers.go @@ -790,7 +790,7 @@ func (mkc *MultiKueueClusterWrapper) Obj() *kueuealpha.MultiKueueCluster { } func (mkc *MultiKueueClusterWrapper) Secret(name, secretName string) *MultiKueueClusterWrapper { - mkc.Spec.KubeconfigRef = kueuealpha.KubeconfigRef{ + mkc.Spec.KubeConfig = kueuealpha.KubeConfig{ Name: name, Location: secretName, LocationType: kueuealpha.SecretLocationType, From 0757d6505e9e8ee3a63522ad8b9113bf048af0dd Mon Sep 17 00:00:00 2001 From: Traian Schiau Date: Thu, 25 Jan 2024 08:59:32 +0200 Subject: [PATCH 5/9] Review Remarks --- apis/kueue/v1alpha1/multikueueconfig_types.go | 7 +- .../kueue.x-k8s.io_multikueueclusters.yaml | 7 +- .../kueue/v1alpha1/multikueueclusterspec.go | 2 +- .../kueue.x-k8s.io_multikueueclusters.yaml | 7 +- .../multikueue/admissioncheck_test.go | 29 ++++ .../multikueue/indexer_test.go | 4 +- .../multikueue/multikueuecluster.go | 2 +- test/e2e/multikueue/e2e_test.go | 26 ++-- .../integration/multikueue/multikueue_test.go | 126 +++++++++--------- test/integration/multikueue/suite_test.go | 18 +-- 10 files changed, 134 insertions(+), 94 deletions(-) diff --git a/apis/kueue/v1alpha1/multikueueconfig_types.go b/apis/kueue/v1alpha1/multikueueconfig_types.go index db43b5390a..4d554c4f7a 100644 --- a/apis/kueue/v1alpha1/multikueueconfig_types.go +++ b/apis/kueue/v1alpha1/multikueueconfig_types.go @@ -52,10 +52,15 @@ type KubeConfig struct { type MultiKueueClusterSpec struct { // Information how to connect to the cluster. - KubeConfig KubeConfig `json:"kubeconfigRef"` + KubeConfig KubeConfig `json:"kubeConfig"` } type MultiKueueClusterStatus struct { + // +optional + // +listType=map + // +listMapKey=type + // +patchStrategy=merge + // +patchMergeKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` } diff --git a/charts/kueue/templates/crd/kueue.x-k8s.io_multikueueclusters.yaml b/charts/kueue/templates/crd/kueue.x-k8s.io_multikueueclusters.yaml index 696bdd72b3..1d0ae133c5 100644 --- a/charts/kueue/templates/crd/kueue.x-k8s.io_multikueueclusters.yaml +++ b/charts/kueue/templates/crd/kueue.x-k8s.io_multikueueclusters.yaml @@ -46,7 +46,7 @@ spec: type: object spec: properties: - kubeconfigRef: + kubeConfig: description: Information how to connect to the cluster. properties: location: @@ -67,7 +67,7 @@ spec: - name type: object required: - - kubeconfigRef + - kubeConfig type: object status: properties: @@ -139,6 +139,9 @@ spec: - type type: object type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map type: object type: object served: true diff --git a/client-go/applyconfiguration/kueue/v1alpha1/multikueueclusterspec.go b/client-go/applyconfiguration/kueue/v1alpha1/multikueueclusterspec.go index af40891fc2..e7e304f7ea 100644 --- a/client-go/applyconfiguration/kueue/v1alpha1/multikueueclusterspec.go +++ b/client-go/applyconfiguration/kueue/v1alpha1/multikueueclusterspec.go @@ -20,7 +20,7 @@ package v1alpha1 // MultiKueueClusterSpecApplyConfiguration represents an declarative configuration of the MultiKueueClusterSpec type for use // with apply. type MultiKueueClusterSpecApplyConfiguration struct { - KubeConfig *KubeConfigApplyConfiguration `json:"kubeconfigRef,omitempty"` + KubeConfig *KubeConfigApplyConfiguration `json:"kubeConfig,omitempty"` } // MultiKueueClusterSpecApplyConfiguration constructs an declarative configuration of the MultiKueueClusterSpec type for use with diff --git a/config/components/crd/bases/kueue.x-k8s.io_multikueueclusters.yaml b/config/components/crd/bases/kueue.x-k8s.io_multikueueclusters.yaml index d934088e28..bf911a066c 100644 --- a/config/components/crd/bases/kueue.x-k8s.io_multikueueclusters.yaml +++ b/config/components/crd/bases/kueue.x-k8s.io_multikueueclusters.yaml @@ -33,7 +33,7 @@ spec: type: object spec: properties: - kubeconfigRef: + kubeConfig: description: Information how to connect to the cluster. properties: location: @@ -54,7 +54,7 @@ spec: - name type: object required: - - kubeconfigRef + - kubeConfig type: object status: properties: @@ -126,6 +126,9 @@ spec: - type type: object type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map type: object type: object served: true diff --git a/pkg/controller/admissionchecks/multikueue/admissioncheck_test.go b/pkg/controller/admissionchecks/multikueue/admissioncheck_test.go index fe33b5b65e..564ceb3d0d 100644 --- a/pkg/controller/admissionchecks/multikueue/admissioncheck_test.go +++ b/pkg/controller/admissionchecks/multikueue/admissioncheck_test.go @@ -129,6 +129,35 @@ func TestReconcile(t *testing.T) { Obj(), }, }, + "missing and inactive cluster": { + reconcileFor: "ac1", + checks: []kueue.AdmissionCheck{ + *utiltesting.MakeAdmissionCheck("ac1"). + ControllerName(ControllerName). + Parameters(kueue.GroupVersion.Group, "MultiKueueConfig", "config1"). + Obj(), + }, + configs: []kueuealpha.MultiKueueConfig{ + *utiltesting.MakeMultiKueueConfig("config1").Clusters("worker1", "worker2", "worker3").Obj(), + }, + + clusters: []kueuealpha.MultiKueueCluster{ + *utiltesting.MakeMultiKueueCluster("worker1").Active(metav1.ConditionFalse, "ByTest", "by test").Obj(), + *utiltesting.MakeMultiKueueCluster("worker2").Active(metav1.ConditionTrue, "ByTest", "by test").Obj(), + }, + wantChecks: []kueue.AdmissionCheck{ + *utiltesting.MakeAdmissionCheck("ac1"). + ControllerName(ControllerName). + Parameters(kueue.GroupVersion.Group, "MultiKueueConfig", "config1"). + Condition(metav1.Condition{ + Type: kueue.AdmissionCheckActive, + Status: metav1.ConditionFalse, + Reason: "Inactive", + Message: "Missing clusters: [worker3], Inactive clusters: [worker1]", + }). + Obj(), + }, + }, "active": { reconcileFor: "ac1", checks: []kueue.AdmissionCheck{ diff --git a/pkg/controller/admissionchecks/multikueue/indexer_test.go b/pkg/controller/admissionchecks/multikueue/indexer_test.go index bc9bd8c52a..14f556b9e1 100644 --- a/pkg/controller/admissionchecks/multikueue/indexer_test.go +++ b/pkg/controller/admissionchecks/multikueue/indexer_test.go @@ -103,7 +103,7 @@ func TestListMultikueClustersUsingKubeconfig(t *testing.T) { k8sclient := builder.Build() for _, req := range tc.clusters { if err := k8sclient.Create(ctx, req); err != nil { - t.Errorf("Unable to create %s request: %v", client.ObjectKeyFromObject(req), err) + t.Fatalf("Unable to create %s cluster: %v", client.ObjectKeyFromObject(req), err) } } @@ -160,7 +160,7 @@ func TestListMultikueConfigsUsingMultikueueClusters(t *testing.T) { k8sclient := builder.Build() for _, config := range tc.configs { if err := k8sclient.Create(ctx, config); err != nil { - t.Errorf("Unable to create %s config: %v", client.ObjectKeyFromObject(config), err) + t.Fatalf("Unable to create %s config: %v", client.ObjectKeyFromObject(config), err) } } diff --git a/pkg/controller/admissionchecks/multikueue/multikueuecluster.go b/pkg/controller/admissionchecks/multikueue/multikueuecluster.go index 23e25d630c..29e74154bb 100644 --- a/pkg/controller/admissionchecks/multikueue/multikueuecluster.go +++ b/pkg/controller/admissionchecks/multikueue/multikueuecluster.go @@ -213,7 +213,7 @@ func (c *clustersReconciler) Reconcile(ctx context.Context, req reconcile.Reques // get the kubeconfig kubeConfig, retry, err := c.getKubeConfig(ctx, &cluster.Spec.KubeConfig) if retry { - return reconcile.Result{}, nil + return reconcile.Result{}, err } if err != nil { log.Error(err, "reading kubeconfig") diff --git a/test/e2e/multikueue/e2e_test.go b/test/e2e/multikueue/e2e_test.go index 71adfe8bc3..f64716b317 100644 --- a/test/e2e/multikueue/e2e_test.go +++ b/test/e2e/multikueue/e2e_test.go @@ -49,13 +49,13 @@ var _ = ginkgo.Describe("MultiKueue", func() { worker1Ns *corev1.Namespace worker2Ns *corev1.Namespace - multiKueueCluster1 *kueuealpha.MultiKueueCluster - multiKueueCluster2 *kueuealpha.MultiKueueCluster - multiKueueConfig *kueuealpha.MultiKueueConfig - multiKueueAc *kueue.AdmissionCheck - managerFlavor *kueue.ResourceFlavor - managerCq *kueue.ClusterQueue - managerLq *kueue.LocalQueue + workerCluster1 *kueuealpha.MultiKueueCluster + workerCluster2 *kueuealpha.MultiKueueCluster + multiKueueConfig *kueuealpha.MultiKueueConfig + multiKueueAc *kueue.AdmissionCheck + managerFlavor *kueue.ResourceFlavor + managerCq *kueue.ClusterQueue + managerLq *kueue.LocalQueue worker1Flavor *kueue.ResourceFlavor worker1Cq *kueue.ClusterQueue @@ -88,11 +88,11 @@ var _ = ginkgo.Describe("MultiKueue", func() { } gomega.Expect(k8sWorker2Client.Create(ctx, worker2Ns)).To(gomega.Succeed()) - multiKueueCluster1 = utiltesting.MakeMultiKueueCluster("worker1").Secret("worker1", "multikueue1").Obj() - gomega.Expect(k8sManagerClient.Create(ctx, multiKueueCluster1)).To(gomega.Succeed()) + workerCluster1 = utiltesting.MakeMultiKueueCluster("worker1").Secret("worker1", "multikueue1").Obj() + gomega.Expect(k8sManagerClient.Create(ctx, workerCluster1)).To(gomega.Succeed()) - multiKueueCluster2 = utiltesting.MakeMultiKueueCluster("worker2").Secret("worker2", "multikueue2").Obj() - gomega.Expect(k8sManagerClient.Create(ctx, multiKueueCluster2)).To(gomega.Succeed()) + workerCluster2 = utiltesting.MakeMultiKueueCluster("worker2").Secret("worker2", "multikueue2").Obj() + gomega.Expect(k8sManagerClient.Create(ctx, workerCluster2)).To(gomega.Succeed()) multiKueueConfig = utiltesting.MakeMultiKueueConfig("multikueueconfig").Clusters("worker1", "worker2").Obj() gomega.Expect(k8sManagerClient.Create(ctx, multiKueueConfig)).Should(gomega.Succeed()) @@ -177,8 +177,8 @@ var _ = ginkgo.Describe("MultiKueue", func() { util.ExpectResourceFlavorToBeDeleted(ctx, k8sManagerClient, managerFlavor, true) util.ExpectAdmissionCheckToBeDeleted(ctx, k8sManagerClient, multiKueueAc, true) gomega.Expect(k8sManagerClient.Delete(ctx, multiKueueConfig)).To(gomega.Succeed()) - gomega.Expect(k8sManagerClient.Delete(ctx, multiKueueCluster1)).To(gomega.Succeed()) - gomega.Expect(k8sManagerClient.Delete(ctx, multiKueueCluster2)).To(gomega.Succeed()) + gomega.Expect(k8sManagerClient.Delete(ctx, workerCluster1)).To(gomega.Succeed()) + gomega.Expect(k8sManagerClient.Delete(ctx, workerCluster2)).To(gomega.Succeed()) }) ginkgo.When("Creating a multikueue admission check", func() { diff --git a/test/integration/multikueue/multikueue_test.go b/test/integration/multikueue/multikueue_test.go index 38e5e61404..e4d537d1bf 100644 --- a/test/integration/multikueue/multikueue_test.go +++ b/test/integration/multikueue/multikueue_test.go @@ -48,8 +48,8 @@ var _ = ginkgo.Describe("Multikueue", func() { managerMultikueueSecret1 *corev1.Secret managerMultikueueSecret2 *corev1.Secret - managerClusterWorker1 *kueuealpha.MultiKueueCluster - managerClusterWorker2 *kueuealpha.MultiKueueCluster + workerCluster1 *kueuealpha.MultiKueueCluster + workerCluster2 *kueuealpha.MultiKueueCluster managerMultiKueueConfig *kueuealpha.MultiKueueConfig multikueueAC *kueue.AdmissionCheck managerCq *kueue.ClusterQueue @@ -67,26 +67,26 @@ var _ = ginkgo.Describe("Multikueue", func() { GenerateName: "multikueue-", }, } - gomega.Expect(managerCluster.client.Create(managerCluster.ctx, managerNs)).To(gomega.Succeed()) + gomega.Expect(managerTestCluster.client.Create(managerTestCluster.ctx, managerNs)).To(gomega.Succeed()) worker1Ns = &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: managerNs.Name, }, } - gomega.Expect(worker1Cluster.client.Create(worker1Cluster.ctx, worker1Ns)).To(gomega.Succeed()) + gomega.Expect(worker1TestCluster.client.Create(worker1TestCluster.ctx, worker1Ns)).To(gomega.Succeed()) worker2Ns = &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: managerNs.Name, }, } - gomega.Expect(worker2Cluster.client.Create(worker2Cluster.ctx, worker2Ns)).To(gomega.Succeed()) + gomega.Expect(worker2TestCluster.client.Create(worker2TestCluster.ctx, worker2Ns)).To(gomega.Succeed()) - w1Kubeconfig, err := worker1Cluster.kubeConfigBytes() + w1Kubeconfig, err := worker1TestCluster.kubeConfigBytes() gomega.Expect(err).NotTo(gomega.HaveOccurred()) - w2Kubeconfig, err := worker2Cluster.kubeConfigBytes() + w2Kubeconfig, err := worker2TestCluster.kubeConfigBytes() gomega.Expect(err).NotTo(gomega.HaveOccurred()) managerMultikueueSecret1 = &corev1.Secret{ @@ -98,7 +98,7 @@ var _ = ginkgo.Describe("Multikueue", func() { kueuealpha.MultiKueueConfigSecretKey: w1Kubeconfig, }, } - gomega.Expect(managerCluster.client.Create(managerCluster.ctx, managerMultikueueSecret1)).To(gomega.Succeed()) + gomega.Expect(managerTestCluster.client.Create(managerTestCluster.ctx, managerMultikueueSecret1)).To(gomega.Succeed()) managerMultikueueSecret2 = &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ @@ -109,28 +109,28 @@ var _ = ginkgo.Describe("Multikueue", func() { kueuealpha.MultiKueueConfigSecretKey: w2Kubeconfig, }, } - gomega.Expect(managerCluster.client.Create(managerCluster.ctx, managerMultikueueSecret2)).To(gomega.Succeed()) + gomega.Expect(managerTestCluster.client.Create(managerTestCluster.ctx, managerMultikueueSecret2)).To(gomega.Succeed()) - managerClusterWorker1 = utiltesting.MakeMultiKueueCluster("worker1").Secret("worker1", managerMultikueueSecret1.Name).Obj() - gomega.Expect(managerCluster.client.Create(managerCluster.ctx, managerClusterWorker1)).To(gomega.Succeed()) + workerCluster1 = utiltesting.MakeMultiKueueCluster("worker1").Secret("worker1", managerMultikueueSecret1.Name).Obj() + gomega.Expect(managerTestCluster.client.Create(managerTestCluster.ctx, workerCluster1)).To(gomega.Succeed()) - managerClusterWorker2 = utiltesting.MakeMultiKueueCluster("worker2").Secret("worker2", managerMultikueueSecret2.Name).Obj() - gomega.Expect(managerCluster.client.Create(managerCluster.ctx, managerClusterWorker2)).To(gomega.Succeed()) + workerCluster2 = utiltesting.MakeMultiKueueCluster("worker2").Secret("worker2", managerMultikueueSecret2.Name).Obj() + gomega.Expect(managerTestCluster.client.Create(managerTestCluster.ctx, workerCluster2)).To(gomega.Succeed()) - managerMultiKueueConfig = utiltesting.MakeMultiKueueConfig("multikueueconfig").Clusters(managerClusterWorker1.Name, managerClusterWorker2.Name).Obj() - gomega.Expect(managerCluster.client.Create(managerCluster.ctx, managerMultiKueueConfig)).Should(gomega.Succeed()) + managerMultiKueueConfig = utiltesting.MakeMultiKueueConfig("multikueueconfig").Clusters(workerCluster1.Name, workerCluster2.Name).Obj() + gomega.Expect(managerTestCluster.client.Create(managerTestCluster.ctx, managerMultiKueueConfig)).Should(gomega.Succeed()) multikueueAC = utiltesting.MakeAdmissionCheck("ac1"). ControllerName(multikueue.ControllerName). Parameters(kueue.GroupVersion.Group, "MultiKueueConfig", managerMultiKueueConfig.Name). Obj() - gomega.Expect(managerCluster.client.Create(managerCluster.ctx, multikueueAC)).Should(gomega.Succeed()) + gomega.Expect(managerTestCluster.client.Create(managerTestCluster.ctx, multikueueAC)).Should(gomega.Succeed()) ginkgo.By("wait for check active", func() { updatetedAc := kueue.AdmissionCheck{} acKey := client.ObjectKeyFromObject(multikueueAC) gomega.Eventually(func(g gomega.Gomega) { - g.Expect(managerCluster.client.Get(managerCluster.ctx, acKey, &updatetedAc)).To(gomega.Succeed()) + g.Expect(managerTestCluster.client.Get(managerTestCluster.ctx, acKey, &updatetedAc)).To(gomega.Succeed()) cond := apimeta.FindStatusCondition(updatetedAc.Status.Conditions, kueue.AdmissionCheckActive) g.Expect(cond).NotTo(gomega.BeNil()) g.Expect(cond.Status).To(gomega.Equal(metav1.ConditionTrue), "Reason: %s, Message: %q", cond.Reason, cond.Status) @@ -140,42 +140,42 @@ var _ = ginkgo.Describe("Multikueue", func() { managerCq = utiltesting.MakeClusterQueue("q1"). AdmissionChecks(multikueueAC.Name). Obj() - gomega.Expect(managerCluster.client.Create(managerCluster.ctx, managerCq)).Should(gomega.Succeed()) + gomega.Expect(managerTestCluster.client.Create(managerTestCluster.ctx, managerCq)).Should(gomega.Succeed()) managerLq = utiltesting.MakeLocalQueue(managerCq.Name, managerNs.Name).ClusterQueue(managerCq.Name).Obj() - gomega.Expect(managerCluster.client.Create(managerCluster.ctx, managerLq)).Should(gomega.Succeed()) + gomega.Expect(managerTestCluster.client.Create(managerTestCluster.ctx, managerLq)).Should(gomega.Succeed()) worker1Cq = utiltesting.MakeClusterQueue("q1").Obj() - gomega.Expect(worker1Cluster.client.Create(worker1Cluster.ctx, worker1Cq)).Should(gomega.Succeed()) + gomega.Expect(worker1TestCluster.client.Create(worker1TestCluster.ctx, worker1Cq)).Should(gomega.Succeed()) worker1Lq = utiltesting.MakeLocalQueue(worker1Cq.Name, worker1Ns.Name).ClusterQueue(worker1Cq.Name).Obj() - gomega.Expect(worker1Cluster.client.Create(worker1Cluster.ctx, worker1Lq)).Should(gomega.Succeed()) + gomega.Expect(worker1TestCluster.client.Create(worker1TestCluster.ctx, worker1Lq)).Should(gomega.Succeed()) worker2Cq = utiltesting.MakeClusterQueue("q1").Obj() - gomega.Expect(worker2Cluster.client.Create(worker2Cluster.ctx, worker2Cq)).Should(gomega.Succeed()) + gomega.Expect(worker2TestCluster.client.Create(worker2TestCluster.ctx, worker2Cq)).Should(gomega.Succeed()) worker2Lq = utiltesting.MakeLocalQueue(worker2Cq.Name, worker2Ns.Name).ClusterQueue(worker2Cq.Name).Obj() - gomega.Expect(worker2Cluster.client.Create(worker2Cluster.ctx, worker2Lq)).Should(gomega.Succeed()) + gomega.Expect(worker2TestCluster.client.Create(worker2TestCluster.ctx, worker2Lq)).Should(gomega.Succeed()) }) ginkgo.AfterEach(func() { - gomega.Expect(util.DeleteNamespace(managerCluster.ctx, managerCluster.client, managerNs)).To(gomega.Succeed()) - gomega.Expect(util.DeleteNamespace(worker1Cluster.ctx, worker1Cluster.client, worker1Ns)).To(gomega.Succeed()) - gomega.Expect(util.DeleteNamespace(worker2Cluster.ctx, worker2Cluster.client, worker2Ns)).To(gomega.Succeed()) - util.ExpectClusterQueueToBeDeleted(managerCluster.ctx, managerCluster.client, managerCq, true) - util.ExpectClusterQueueToBeDeleted(worker1Cluster.ctx, worker1Cluster.client, worker1Cq, true) - util.ExpectClusterQueueToBeDeleted(worker2Cluster.ctx, worker2Cluster.client, worker2Cq, true) - util.ExpectAdmissionCheckToBeDeleted(managerCluster.ctx, managerCluster.client, multikueueAC, true) - gomega.Expect(managerCluster.client.Delete(managerCluster.ctx, managerMultiKueueConfig)).To(gomega.Succeed()) - gomega.Expect(managerCluster.client.Delete(managerCluster.ctx, managerClusterWorker1)).To(gomega.Succeed()) - gomega.Expect(managerCluster.client.Delete(managerCluster.ctx, managerClusterWorker2)).To(gomega.Succeed()) - gomega.Expect(managerCluster.client.Delete(managerCluster.ctx, managerMultikueueSecret1)).To(gomega.Succeed()) - gomega.Expect(managerCluster.client.Delete(managerCluster.ctx, managerMultikueueSecret2)).To(gomega.Succeed()) + gomega.Expect(util.DeleteNamespace(managerTestCluster.ctx, managerTestCluster.client, managerNs)).To(gomega.Succeed()) + gomega.Expect(util.DeleteNamespace(worker1TestCluster.ctx, worker1TestCluster.client, worker1Ns)).To(gomega.Succeed()) + gomega.Expect(util.DeleteNamespace(worker2TestCluster.ctx, worker2TestCluster.client, worker2Ns)).To(gomega.Succeed()) + util.ExpectClusterQueueToBeDeleted(managerTestCluster.ctx, managerTestCluster.client, managerCq, true) + util.ExpectClusterQueueToBeDeleted(worker1TestCluster.ctx, worker1TestCluster.client, worker1Cq, true) + util.ExpectClusterQueueToBeDeleted(worker2TestCluster.ctx, worker2TestCluster.client, worker2Cq, true) + util.ExpectAdmissionCheckToBeDeleted(managerTestCluster.ctx, managerTestCluster.client, multikueueAC, true) + gomega.Expect(managerTestCluster.client.Delete(managerTestCluster.ctx, managerMultiKueueConfig)).To(gomega.Succeed()) + gomega.Expect(managerTestCluster.client.Delete(managerTestCluster.ctx, workerCluster1)).To(gomega.Succeed()) + gomega.Expect(managerTestCluster.client.Delete(managerTestCluster.ctx, workerCluster2)).To(gomega.Succeed()) + gomega.Expect(managerTestCluster.client.Delete(managerTestCluster.ctx, managerMultikueueSecret1)).To(gomega.Succeed()) + gomega.Expect(managerTestCluster.client.Delete(managerTestCluster.ctx, managerMultikueueSecret2)).To(gomega.Succeed()) }) ginkgo.It("Should run a job on worker if admitted", func() { job := testingjob.MakeJob("job", managerNs.Name). Queue(managerLq.Name). Obj() - gomega.Expect(managerCluster.client.Create(managerCluster.ctx, job)).Should(gomega.Succeed()) + gomega.Expect(managerTestCluster.client.Create(managerTestCluster.ctx, job)).Should(gomega.Succeed()) createdWorkload := &kueue.Workload{} wlLookupKey := types.NamespacedName{Name: workloadjob.GetWorkloadNameForJob(job.Name), Namespace: managerNs.Name} @@ -183,18 +183,18 @@ var _ = ginkgo.Describe("Multikueue", func() { ginkgo.By("setting workload reservation in the management cluster", func() { admission := utiltesting.MakeAdmission(managerCq.Name).Obj() gomega.Eventually(func(g gomega.Gomega) { - g.Expect(managerCluster.client.Get(managerCluster.ctx, wlLookupKey, createdWorkload)).To(gomega.Succeed()) - g.Expect(util.SetQuotaReservation(managerCluster.ctx, managerCluster.client, createdWorkload, admission)).To(gomega.Succeed()) + g.Expect(managerTestCluster.client.Get(managerTestCluster.ctx, wlLookupKey, createdWorkload)).To(gomega.Succeed()) + g.Expect(util.SetQuotaReservation(managerTestCluster.ctx, managerTestCluster.client, createdWorkload, admission)).To(gomega.Succeed()) }, util.Timeout, util.Interval).Should(gomega.Succeed()) }) ginkgo.By("checking the workload creation in the worker clusters", func() { managerWl := &kueue.Workload{} - gomega.Expect(managerCluster.client.Get(managerCluster.ctx, wlLookupKey, managerWl)).To(gomega.Succeed()) + gomega.Expect(managerTestCluster.client.Get(managerTestCluster.ctx, wlLookupKey, managerWl)).To(gomega.Succeed()) gomega.Eventually(func(g gomega.Gomega) { - g.Expect(worker1Cluster.client.Get(worker1Cluster.ctx, wlLookupKey, createdWorkload)).To(gomega.Succeed()) + g.Expect(worker1TestCluster.client.Get(worker1TestCluster.ctx, wlLookupKey, createdWorkload)).To(gomega.Succeed()) g.Expect(createdWorkload.Spec).To(gomega.BeComparableTo(managerWl.Spec)) - g.Expect(worker2Cluster.client.Get(worker2Cluster.ctx, wlLookupKey, createdWorkload)).To(gomega.Succeed()) + g.Expect(worker2TestCluster.client.Get(worker2TestCluster.ctx, wlLookupKey, createdWorkload)).To(gomega.Succeed()) g.Expect(createdWorkload.Spec).To(gomega.BeComparableTo(managerWl.Spec)) }, util.Timeout, util.Interval).Should(gomega.Succeed()) }) @@ -203,12 +203,12 @@ var _ = ginkgo.Describe("Multikueue", func() { admission := utiltesting.MakeAdmission(managerCq.Name).Obj() gomega.Eventually(func(g gomega.Gomega) { - g.Expect(worker1Cluster.client.Get(worker1Cluster.ctx, wlLookupKey, createdWorkload)).To(gomega.Succeed()) - g.Expect(util.SetQuotaReservation(worker1Cluster.ctx, worker1Cluster.client, createdWorkload, admission)).To(gomega.Succeed()) + g.Expect(worker1TestCluster.client.Get(worker1TestCluster.ctx, wlLookupKey, createdWorkload)).To(gomega.Succeed()) + g.Expect(util.SetQuotaReservation(worker1TestCluster.ctx, worker1TestCluster.client, createdWorkload, admission)).To(gomega.Succeed()) }, util.Timeout, util.Interval).Should(gomega.Succeed()) gomega.Eventually(func(g gomega.Gomega) { - g.Expect(managerCluster.client.Get(managerCluster.ctx, wlLookupKey, createdWorkload)).To(gomega.Succeed()) + g.Expect(managerTestCluster.client.Get(managerTestCluster.ctx, wlLookupKey, createdWorkload)).To(gomega.Succeed()) acs := workload.FindAdmissionCheck(createdWorkload.Status.AdmissionChecks, multikueueAC.Name) g.Expect(acs).NotTo(gomega.BeNil()) g.Expect(acs.State).To(gomega.Equal(kueue.CheckStatePending)) @@ -216,25 +216,25 @@ var _ = ginkgo.Describe("Multikueue", func() { }, util.Timeout, util.Interval).Should(gomega.Succeed()) gomega.Eventually(func(g gomega.Gomega) { - g.Expect(worker2Cluster.client.Get(worker2Cluster.ctx, wlLookupKey, createdWorkload)).To(utiltesting.BeNotFoundError()) + g.Expect(worker2TestCluster.client.Get(worker2TestCluster.ctx, wlLookupKey, createdWorkload)).To(utiltesting.BeNotFoundError()) }, util.Timeout, util.Interval).Should(gomega.Succeed()) }) ginkgo.By("finishing the worker job", func() { gomega.Eventually(func(g gomega.Gomega) { createdJob := batchv1.Job{} - g.Expect(worker1Cluster.client.Get(worker1Cluster.ctx, client.ObjectKeyFromObject(job), &createdJob)).To(gomega.Succeed()) + g.Expect(worker1TestCluster.client.Get(worker1TestCluster.ctx, client.ObjectKeyFromObject(job), &createdJob)).To(gomega.Succeed()) createdJob.Status.Conditions = append(createdJob.Status.Conditions, batchv1.JobCondition{ Type: batchv1.JobComplete, Status: corev1.ConditionTrue, LastProbeTime: metav1.Now(), LastTransitionTime: metav1.Now(), }) - g.Expect(worker1Cluster.client.Status().Update(worker1Cluster.ctx, &createdJob)).To(gomega.Succeed()) + g.Expect(worker1TestCluster.client.Status().Update(worker1TestCluster.ctx, &createdJob)).To(gomega.Succeed()) }, util.Timeout, util.Interval).Should(gomega.Succeed()) gomega.Eventually(func(g gomega.Gomega) { - g.Expect(managerCluster.client.Get(managerCluster.ctx, wlLookupKey, createdWorkload)).To(gomega.Succeed()) + g.Expect(managerTestCluster.client.Get(managerTestCluster.ctx, wlLookupKey, createdWorkload)).To(gomega.Succeed()) g.Expect(apimeta.FindStatusCondition(createdWorkload.Status.Conditions, kueue.WorkloadFinished)).To(gomega.BeComparableTo(&metav1.Condition{ Type: kueue.WorkloadFinished, @@ -246,7 +246,7 @@ var _ = ginkgo.Describe("Multikueue", func() { gomega.Eventually(func(g gomega.Gomega) { createdWorkload := &kueue.Workload{} - g.Expect(worker1Cluster.client.Get(worker1Cluster.ctx, wlLookupKey, createdWorkload)).To(utiltesting.BeNotFoundError()) + g.Expect(worker1TestCluster.client.Get(worker1TestCluster.ctx, wlLookupKey, createdWorkload)).To(utiltesting.BeNotFoundError()) }, util.LongTimeout, util.Interval).Should(gomega.Succeed()) }) @@ -269,7 +269,7 @@ var _ = ginkgo.Describe("Multikueue", func() { }, ). Obj() - gomega.Expect(managerCluster.client.Create(managerCluster.ctx, jobSet)).Should(gomega.Succeed()) + gomega.Expect(managerTestCluster.client.Create(managerTestCluster.ctx, jobSet)).Should(gomega.Succeed()) createdWorkload := &kueue.Workload{} wlLookupKey := types.NamespacedName{Name: workloadjobset.GetWorkloadNameForJobSet(jobSet.Name), Namespace: managerNs.Name} @@ -284,30 +284,30 @@ var _ = ginkgo.Describe("Multikueue", func() { ginkgo.By("setting workload reservation in the management cluster", func() { gomega.Eventually(func(g gomega.Gomega) { - g.Expect(managerCluster.client.Get(managerCluster.ctx, wlLookupKey, createdWorkload)).To(gomega.Succeed()) - g.Expect(util.SetQuotaReservation(managerCluster.ctx, managerCluster.client, createdWorkload, admission)).To(gomega.Succeed()) + g.Expect(managerTestCluster.client.Get(managerTestCluster.ctx, wlLookupKey, createdWorkload)).To(gomega.Succeed()) + g.Expect(util.SetQuotaReservation(managerTestCluster.ctx, managerTestCluster.client, createdWorkload, admission)).To(gomega.Succeed()) }, util.Timeout, util.Interval).Should(gomega.Succeed()) }) ginkgo.By("checking the workload creation in the worker clusters", func() { managerWl := &kueue.Workload{} - gomega.Expect(managerCluster.client.Get(managerCluster.ctx, wlLookupKey, managerWl)).To(gomega.Succeed()) + gomega.Expect(managerTestCluster.client.Get(managerTestCluster.ctx, wlLookupKey, managerWl)).To(gomega.Succeed()) gomega.Eventually(func(g gomega.Gomega) { - g.Expect(worker2Cluster.client.Get(worker2Cluster.ctx, wlLookupKey, createdWorkload)).To(gomega.Succeed()) + g.Expect(worker2TestCluster.client.Get(worker2TestCluster.ctx, wlLookupKey, createdWorkload)).To(gomega.Succeed()) g.Expect(createdWorkload.Spec).To(gomega.BeComparableTo(managerWl.Spec)) - g.Expect(worker1Cluster.client.Get(worker1Cluster.ctx, wlLookupKey, createdWorkload)).To(gomega.Succeed()) + g.Expect(worker1TestCluster.client.Get(worker1TestCluster.ctx, wlLookupKey, createdWorkload)).To(gomega.Succeed()) g.Expect(createdWorkload.Spec).To(gomega.BeComparableTo(managerWl.Spec)) }, util.Timeout, util.Interval).Should(gomega.Succeed()) }) ginkgo.By("setting workload reservation in worker2, the workload is admitted in manager amd worker1 wl is removed", func() { gomega.Eventually(func(g gomega.Gomega) { - g.Expect(worker2Cluster.client.Get(worker2Cluster.ctx, wlLookupKey, createdWorkload)).To(gomega.Succeed()) - g.Expect(util.SetQuotaReservation(worker2Cluster.ctx, worker2Cluster.client, createdWorkload, admission)).To(gomega.Succeed()) + g.Expect(worker2TestCluster.client.Get(worker2TestCluster.ctx, wlLookupKey, createdWorkload)).To(gomega.Succeed()) + g.Expect(util.SetQuotaReservation(worker2TestCluster.ctx, worker2TestCluster.client, createdWorkload, admission)).To(gomega.Succeed()) }, util.Timeout, util.Interval).Should(gomega.Succeed()) gomega.Eventually(func(g gomega.Gomega) { - g.Expect(managerCluster.client.Get(managerCluster.ctx, wlLookupKey, createdWorkload)).To(gomega.Succeed()) + g.Expect(managerTestCluster.client.Get(managerTestCluster.ctx, wlLookupKey, createdWorkload)).To(gomega.Succeed()) acs := workload.FindAdmissionCheck(createdWorkload.Status.AdmissionChecks, multikueueAC.Name) g.Expect(acs).NotTo(gomega.BeNil()) g.Expect(acs.State).To(gomega.Equal(kueue.CheckStateReady)) @@ -323,25 +323,25 @@ var _ = ginkgo.Describe("Multikueue", func() { }, util.Timeout, util.Interval).Should(gomega.Succeed()) gomega.Eventually(func(g gomega.Gomega) { - g.Expect(worker1Cluster.client.Get(worker1Cluster.ctx, wlLookupKey, createdWorkload)).To(utiltesting.BeNotFoundError()) + g.Expect(worker1TestCluster.client.Get(worker1TestCluster.ctx, wlLookupKey, createdWorkload)).To(utiltesting.BeNotFoundError()) }, util.Timeout, util.Interval).Should(gomega.Succeed()) }) ginkgo.By("finishing the worker jobSet, the manager's wl is marked as finished and the worker2 wl removed", func() { gomega.Eventually(func(g gomega.Gomega) { createdJobSet := jobset.JobSet{} - g.Expect(worker2Cluster.client.Get(worker2Cluster.ctx, client.ObjectKeyFromObject(jobSet), &createdJobSet)).To(gomega.Succeed()) + g.Expect(worker2TestCluster.client.Get(worker2TestCluster.ctx, client.ObjectKeyFromObject(jobSet), &createdJobSet)).To(gomega.Succeed()) apimeta.SetStatusCondition(&createdJobSet.Status.Conditions, metav1.Condition{ Type: string(jobset.JobSetCompleted), Status: metav1.ConditionTrue, Reason: "ByTest", Message: "by test", }) - g.Expect(worker2Cluster.client.Status().Update(worker2Cluster.ctx, &createdJobSet)).To(gomega.Succeed()) + g.Expect(worker2TestCluster.client.Status().Update(worker2TestCluster.ctx, &createdJobSet)).To(gomega.Succeed()) }, util.Timeout, util.Interval).Should(gomega.Succeed()) gomega.Eventually(func(g gomega.Gomega) { - g.Expect(managerCluster.client.Get(managerCluster.ctx, wlLookupKey, createdWorkload)).To(gomega.Succeed()) + g.Expect(managerTestCluster.client.Get(managerTestCluster.ctx, wlLookupKey, createdWorkload)).To(gomega.Succeed()) g.Expect(apimeta.FindStatusCondition(createdWorkload.Status.Conditions, kueue.WorkloadFinished)).To(gomega.BeComparableTo(&metav1.Condition{ Type: kueue.WorkloadFinished, @@ -353,7 +353,7 @@ var _ = ginkgo.Describe("Multikueue", func() { gomega.Eventually(func(g gomega.Gomega) { createdWorkload := &kueue.Workload{} - g.Expect(worker2Cluster.client.Get(worker2Cluster.ctx, wlLookupKey, createdWorkload)).To(utiltesting.BeNotFoundError()) + g.Expect(worker2TestCluster.client.Get(worker2TestCluster.ctx, wlLookupKey, createdWorkload)).To(utiltesting.BeNotFoundError()) }, util.LongTimeout, util.Interval).Should(gomega.Succeed()) }) diff --git a/test/integration/multikueue/suite_test.go b/test/integration/multikueue/suite_test.go index c362339950..a614e3dc0a 100644 --- a/test/integration/multikueue/suite_test.go +++ b/test/integration/multikueue/suite_test.go @@ -80,9 +80,9 @@ func (c *cluster) kubeConfigBytes() ([]byte, error) { } var ( - managerCluster cluster - worker1Cluster cluster - worker2Cluster cluster + managerTestCluster cluster + worker1TestCluster cluster + worker2TestCluster cluster managersConfigNamespace *corev1.Namespace ) @@ -107,15 +107,15 @@ func createCluster(setupFnc framework.ManagerSetup) cluster { } var _ = ginkgo.BeforeSuite(func() { - managerCluster = createCluster(managerAndMultiKueueSetup) - worker1Cluster = createCluster(managerSetup) - worker2Cluster = createCluster(managerSetup) + managerTestCluster = createCluster(managerAndMultiKueueSetup) + worker1TestCluster = createCluster(managerSetup) + worker2TestCluster = createCluster(managerSetup) }) var _ = ginkgo.AfterSuite(func() { - managerCluster.fwk.Teardown() - worker1Cluster.fwk.Teardown() - worker2Cluster.fwk.Teardown() + managerTestCluster.fwk.Teardown() + worker1TestCluster.fwk.Teardown() + worker2TestCluster.fwk.Teardown() }) func managerSetup(mgr manager.Manager, ctx context.Context) { From 6ca7fff3d82891a0b3c42dcf489f9a1c382c6185 Mon Sep 17 00:00:00 2001 From: Traian Schiau Date: Fri, 26 Jan 2024 12:22:30 +0200 Subject: [PATCH 6/9] Review Remarks --- ...eueconfig_types.go => multikueue_types.go} | 10 +- .../kueue.x-k8s.io_multikueueclusters.yaml | 5 +- .../crd/kueue.x-k8s.io_multikueueconfigs.yaml | 2 +- .../kueue.x-k8s.io_multikueueclusters.yaml | 5 +- .../kueue.x-k8s.io_multikueueconfigs.yaml | 2 +- .../multikueue/admissioncheck.go | 22 ++- .../multikueue/admissioncheck_test.go | 20 +-- .../admissionchecks/multikueue/indexer.go | 6 +- .../multikueue/indexer_test.go | 16 +-- .../multikueue/jobset_adapter_test.go | 5 +- .../multikueue/multikueuecluster.go | 45 +++--- .../multikueue/multikueuecluster_test.go | 20 +-- .../multikueue/workload_test.go | 5 +- pkg/util/testing/wrappers.go | 2 +- test/e2e/multikueue/e2e_test.go | 6 +- .../integration/multikueue/multikueue_test.go | 135 +++++++++++++++++- test/integration/multikueue/suite_test.go | 7 +- 17 files changed, 233 insertions(+), 80 deletions(-) rename apis/kueue/v1alpha1/{multikueueconfig_types.go => multikueue_types.go} (92%) diff --git a/apis/kueue/v1alpha1/multikueueconfig_types.go b/apis/kueue/v1alpha1/multikueue_types.go similarity index 92% rename from apis/kueue/v1alpha1/multikueueconfig_types.go rename to apis/kueue/v1alpha1/multikueue_types.go index 4d554c4f7a..b421ade631 100644 --- a/apis/kueue/v1alpha1/multikueueconfig_types.go +++ b/apis/kueue/v1alpha1/multikueue_types.go @@ -22,10 +22,7 @@ import ( const ( MultiKueueConfigSecretKey = "kubeconfig" -) - -const ( - MultiKueueClusterActive = "Active" + MultiKueueClusterActive = "Active" ) type LocationType string @@ -41,6 +38,9 @@ type KubeConfig struct { Name string `json:"name"` // Location of the KubeConfig. + // + // If LocationType is Secret then Location is the name of the secret inside the namespace in + // which the kueue controller manager is running. The config should be stored in the "kubeconfig" key. Location string `json:"location"` // Type of the KubeConfig location. @@ -95,7 +95,7 @@ type MultiKueueConfigSpec struct { // // +listType=set // +kubebuilder:validation:MinItems=1 - // +kubebuilder:validation:MaxItems=100 + // +kubebuilder:validation:MaxItems=10 Clusters []string `json:"clusters"` } diff --git a/charts/kueue/templates/crd/kueue.x-k8s.io_multikueueclusters.yaml b/charts/kueue/templates/crd/kueue.x-k8s.io_multikueueclusters.yaml index 1d0ae133c5..9a67db9391 100644 --- a/charts/kueue/templates/crd/kueue.x-k8s.io_multikueueclusters.yaml +++ b/charts/kueue/templates/crd/kueue.x-k8s.io_multikueueclusters.yaml @@ -50,7 +50,10 @@ spec: description: Information how to connect to the cluster. properties: location: - description: Location of the KubeConfig. + description: "Location of the KubeConfig. \n If LocationType is + Secret then Location is the name of the secret inside the namespace + in which the kueue controller manager is running. The config + should be stored in the \"kubeconfig\" key." type: string locationType: default: Secret diff --git a/charts/kueue/templates/crd/kueue.x-k8s.io_multikueueconfigs.yaml b/charts/kueue/templates/crd/kueue.x-k8s.io_multikueueconfigs.yaml index d457faec3a..8f39877602 100644 --- a/charts/kueue/templates/crd/kueue.x-k8s.io_multikueueconfigs.yaml +++ b/charts/kueue/templates/crd/kueue.x-k8s.io_multikueueconfigs.yaml @@ -52,7 +52,7 @@ spec: from the ClusterQueue should be distributed. items: type: string - maxItems: 100 + maxItems: 10 minItems: 1 type: array x-kubernetes-list-type: set diff --git a/config/components/crd/bases/kueue.x-k8s.io_multikueueclusters.yaml b/config/components/crd/bases/kueue.x-k8s.io_multikueueclusters.yaml index bf911a066c..edd4ce793d 100644 --- a/config/components/crd/bases/kueue.x-k8s.io_multikueueclusters.yaml +++ b/config/components/crd/bases/kueue.x-k8s.io_multikueueclusters.yaml @@ -37,7 +37,10 @@ spec: description: Information how to connect to the cluster. properties: location: - description: Location of the KubeConfig. + description: "Location of the KubeConfig. \n If LocationType is + Secret then Location is the name of the secret inside the namespace + in which the kueue controller manager is running. The config + should be stored in the \"kubeconfig\" key." type: string locationType: default: Secret diff --git a/config/components/crd/bases/kueue.x-k8s.io_multikueueconfigs.yaml b/config/components/crd/bases/kueue.x-k8s.io_multikueueconfigs.yaml index 62b641e14f..cb4189a45a 100644 --- a/config/components/crd/bases/kueue.x-k8s.io_multikueueconfigs.yaml +++ b/config/components/crd/bases/kueue.x-k8s.io_multikueueconfigs.yaml @@ -39,7 +39,7 @@ spec: from the ClusterQueue should be distributed. items: type: string - maxItems: 100 + maxItems: 10 minItems: 1 type: array x-kubernetes-list-type: set diff --git a/pkg/controller/admissionchecks/multikueue/admissioncheck.go b/pkg/controller/admissionchecks/multikueue/admissioncheck.go index cbfe41d1db..41d3459f99 100644 --- a/pkg/controller/admissionchecks/multikueue/admissioncheck.go +++ b/pkg/controller/admissionchecks/multikueue/admissioncheck.go @@ -115,7 +115,7 @@ func (a *ACReconciler) Reconcile(ctx context.Context, req reconcile.Request) (re } oldCondition := apimeta.FindStatusCondition(ac.Status.Conditions, kueue.AdmissionCheckActive) - if oldCondition == nil || oldCondition.Status != newCondition.Status || oldCondition.Reason != newCondition.Reason || oldCondition.Message != newCondition.Message { + if !cmpConditionState(oldCondition, &newCondition) { apimeta.SetStatusCondition(&ac.Status.Conditions, newCondition) err := a.client.Status().Update(ctx, ac) if err != nil { @@ -225,13 +225,13 @@ type mkClusterHandler struct { var _ handler.EventHandler = (*mkClusterHandler)(nil) func (m *mkClusterHandler) Create(ctx context.Context, event event.CreateEvent, q workqueue.RateLimitingInterface) { - mkc, isMKC := event.Object.(*kueuealpha.MultiKueueConfig) + mkc, isMKC := event.Object.(*kueuealpha.MultiKueueCluster) if !isMKC { return } if err := queueReconcileForConfigUsers(ctx, mkc.Name, m.client, q); err != nil { - ctrl.LoggerFrom(ctx).V(2).Error(err, "Failure on create event", "multiKueueConfig", klog.KObj(mkc)) + ctrl.LoggerFrom(ctx).V(2).Error(err, "Failure on create event", "multiKueueCluster", klog.KObj(mkc)) } } @@ -242,9 +242,9 @@ func (m *mkClusterHandler) Update(ctx context.Context, event event.UpdateEvent, return } - oldActive := apimeta.IsStatusConditionTrue(oldMKC.Status.Conditions, kueuealpha.MultiKueueClusterActive) - newActive := apimeta.IsStatusConditionTrue(newMKC.Status.Conditions, kueuealpha.MultiKueueClusterActive) - if oldActive != newActive { + oldActive := apimeta.FindStatusCondition(oldMKC.Status.Conditions, kueuealpha.MultiKueueClusterActive) + newActive := apimeta.FindStatusCondition(newMKC.Status.Conditions, kueuealpha.MultiKueueClusterActive) + if !cmpConditionState(oldActive, newActive) { if err := m.queue(ctx, newMKC, q); err != nil { ctrl.LoggerFrom(ctx).V(2).Error(err, "Failure on update event", "multiKueueCluster", klog.KObj(oldMKC)) } @@ -286,3 +286,13 @@ func (m *mkClusterHandler) queue(ctx context.Context, cluster *kueuealpha.MultiK } return nil } + +func cmpConditionState(a, b *metav1.Condition) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Status == b.Status && a.Reason == b.Reason && a.Message == b.Message +} diff --git a/pkg/controller/admissionchecks/multikueue/admissioncheck_test.go b/pkg/controller/admissionchecks/multikueue/admissioncheck_test.go index 564ceb3d0d..afaf2832fd 100644 --- a/pkg/controller/admissionchecks/multikueue/admissioncheck_test.go +++ b/pkg/controller/admissionchecks/multikueue/admissioncheck_test.go @@ -48,13 +48,13 @@ func TestReconcile(t *testing.T) { checks: []kueue.AdmissionCheck{ *utiltesting.MakeAdmissionCheck("ac1"). ControllerName(ControllerName). - Parameters(kueue.GroupVersion.Group, "MultiKueueConfig", "config1"). + Parameters(kueuealpha.GroupVersion.Group, "MultiKueueConfig", "config1"). Obj(), }, wantChecks: []kueue.AdmissionCheck{ *utiltesting.MakeAdmissionCheck("ac1"). ControllerName(ControllerName). - Parameters(kueue.GroupVersion.Group, "MultiKueueConfig", "config1"). + Parameters(kueuealpha.GroupVersion.Group, "MultiKueueConfig", "config1"). Condition(metav1.Condition{ Type: kueue.AdmissionCheckActive, Status: metav1.ConditionFalse, @@ -82,7 +82,7 @@ func TestReconcile(t *testing.T) { checks: []kueue.AdmissionCheck{ *utiltesting.MakeAdmissionCheck("ac1"). ControllerName(ControllerName). - Parameters(kueue.GroupVersion.Group, "MultiKueueConfig", "config1"). + Parameters(kueuealpha.GroupVersion.Group, "MultiKueueConfig", "config1"). Obj(), }, configs: []kueuealpha.MultiKueueConfig{ @@ -91,7 +91,7 @@ func TestReconcile(t *testing.T) { wantChecks: []kueue.AdmissionCheck{ *utiltesting.MakeAdmissionCheck("ac1"). ControllerName(ControllerName). - Parameters(kueue.GroupVersion.Group, "MultiKueueConfig", "config1"). + Parameters(kueuealpha.GroupVersion.Group, "MultiKueueConfig", "config1"). Condition(metav1.Condition{ Type: kueue.AdmissionCheckActive, Status: metav1.ConditionFalse, @@ -106,7 +106,7 @@ func TestReconcile(t *testing.T) { checks: []kueue.AdmissionCheck{ *utiltesting.MakeAdmissionCheck("ac1"). ControllerName(ControllerName). - Parameters(kueue.GroupVersion.Group, "MultiKueueConfig", "config1"). + Parameters(kueuealpha.GroupVersion.Group, "MultiKueueConfig", "config1"). Obj(), }, configs: []kueuealpha.MultiKueueConfig{ @@ -119,7 +119,7 @@ func TestReconcile(t *testing.T) { wantChecks: []kueue.AdmissionCheck{ *utiltesting.MakeAdmissionCheck("ac1"). ControllerName(ControllerName). - Parameters(kueue.GroupVersion.Group, "MultiKueueConfig", "config1"). + Parameters(kueuealpha.GroupVersion.Group, "MultiKueueConfig", "config1"). Condition(metav1.Condition{ Type: kueue.AdmissionCheckActive, Status: metav1.ConditionFalse, @@ -134,7 +134,7 @@ func TestReconcile(t *testing.T) { checks: []kueue.AdmissionCheck{ *utiltesting.MakeAdmissionCheck("ac1"). ControllerName(ControllerName). - Parameters(kueue.GroupVersion.Group, "MultiKueueConfig", "config1"). + Parameters(kueuealpha.GroupVersion.Group, "MultiKueueConfig", "config1"). Obj(), }, configs: []kueuealpha.MultiKueueConfig{ @@ -148,7 +148,7 @@ func TestReconcile(t *testing.T) { wantChecks: []kueue.AdmissionCheck{ *utiltesting.MakeAdmissionCheck("ac1"). ControllerName(ControllerName). - Parameters(kueue.GroupVersion.Group, "MultiKueueConfig", "config1"). + Parameters(kueuealpha.GroupVersion.Group, "MultiKueueConfig", "config1"). Condition(metav1.Condition{ Type: kueue.AdmissionCheckActive, Status: metav1.ConditionFalse, @@ -163,7 +163,7 @@ func TestReconcile(t *testing.T) { checks: []kueue.AdmissionCheck{ *utiltesting.MakeAdmissionCheck("ac1"). ControllerName(ControllerName). - Parameters(kueue.GroupVersion.Group, "MultiKueueConfig", "config1"). + Parameters(kueuealpha.GroupVersion.Group, "MultiKueueConfig", "config1"). Obj(), }, configs: []kueuealpha.MultiKueueConfig{ @@ -175,7 +175,7 @@ func TestReconcile(t *testing.T) { wantChecks: []kueue.AdmissionCheck{ *utiltesting.MakeAdmissionCheck("ac1"). ControllerName(ControllerName). - Parameters(kueue.GroupVersion.Group, "MultiKueueConfig", "config1"). + Parameters(kueuealpha.GroupVersion.Group, "MultiKueueConfig", "config1"). Condition(metav1.Condition{ Type: kueue.AdmissionCheckActive, Status: metav1.ConditionTrue, diff --git a/pkg/controller/admissionchecks/multikueue/indexer.go b/pkg/controller/admissionchecks/multikueue/indexer.go index 3ff9d2fb63..89786ce2a4 100644 --- a/pkg/controller/admissionchecks/multikueue/indexer.go +++ b/pkg/controller/admissionchecks/multikueue/indexer.go @@ -35,7 +35,7 @@ const ( ) var ( - configGVK = kueue.GroupVersion.WithKind("MultiKueueConfig") + configGVK = kueuealpha.GroupVersion.WithKind("MultiKueueConfig") ) func getIndexUsingKubeConfigs(configNamespace string) func(obj client.Object) []string { @@ -58,10 +58,10 @@ func indexUsingMultiKueueClusters(obj client.Object) []string { func SetupIndexer(ctx context.Context, indexer client.FieldIndexer, configNamespace string) error { if err := indexer.IndexField(ctx, &kueuealpha.MultiKueueCluster{}, UsingKubeConfigs, getIndexUsingKubeConfigs(configNamespace)); err != nil { - return fmt.Errorf("setting index on clusters config used kubeconfig: %w", err) + return fmt.Errorf("setting index on clusters using kubeconfig: %w", err) } if err := indexer.IndexField(ctx, &kueuealpha.MultiKueueConfig{}, UsingMultiKueueClusters, indexUsingMultiKueueClusters); err != nil { - return fmt.Errorf("setting index on checks config used clusters: %w", err) + return fmt.Errorf("setting index on configs using clusters: %w", err) } if err := indexer.IndexField(ctx, &kueue.AdmissionCheck{}, AdmissionCheckUsingConfigKey, admissioncheck.IndexerByConfigFunction(ControllerName, configGVK)); err != nil { return fmt.Errorf("setting index on admission checks config: %w", err) diff --git a/pkg/controller/admissionchecks/multikueue/indexer_test.go b/pkg/controller/admissionchecks/multikueue/indexer_test.go index 14f556b9e1..b886c0cda0 100644 --- a/pkg/controller/admissionchecks/multikueue/indexer_test.go +++ b/pkg/controller/admissionchecks/multikueue/indexer_test.go @@ -65,7 +65,7 @@ func getClientBuilder() (*fake.ClientBuilder, context.Context) { return builder, ctx } -func TestListMultikueClustersUsingKubeconfig(t *testing.T) { +func TestListMultiKueueClustersUsingKubeConfig(t *testing.T) { cases := map[string]struct { clusters []*kueuealpha.MultiKueueCluster filter client.ListOption @@ -77,21 +77,21 @@ func TestListMultikueClustersUsingKubeconfig(t *testing.T) { }, "single cluster, single match": { clusters: []*kueuealpha.MultiKueueCluster{ - utiltesting.MakeMultiKueueCluster("cluster1").Secret("", "secret1").Obj(), + utiltesting.MakeMultiKueueCluster("cluster1").KubeConfig("", "secret1").Obj(), }, filter: client.MatchingFields{UsingKubeConfigs: TestNamespace + "/secret1"}, wantList: []string{"cluster1"}, }, "single cluster, no match": { clusters: []*kueuealpha.MultiKueueCluster{ - utiltesting.MakeMultiKueueCluster("cluster2").Secret("", "secret2").Obj(), + utiltesting.MakeMultiKueueCluster("cluster2").KubeConfig("", "secret2").Obj(), }, filter: client.MatchingFields{UsingKubeConfigs: TestNamespace + "/secret1"}, }, "multiple clusters, single match": { clusters: []*kueuealpha.MultiKueueCluster{ - utiltesting.MakeMultiKueueCluster("cluster1").Secret("", "secret1").Obj(), - utiltesting.MakeMultiKueueCluster("cluster2").Secret("", "secret2").Obj(), + utiltesting.MakeMultiKueueCluster("cluster1").KubeConfig("", "secret1").Obj(), + utiltesting.MakeMultiKueueCluster("cluster2").KubeConfig("", "secret2").Obj(), }, filter: client.MatchingFields{UsingKubeConfigs: TestNamespace + "/secret1"}, wantList: []string{"cluster1"}, @@ -103,7 +103,7 @@ func TestListMultikueClustersUsingKubeconfig(t *testing.T) { k8sclient := builder.Build() for _, req := range tc.clusters { if err := k8sclient.Create(ctx, req); err != nil { - t.Fatalf("Unable to create %s cluster: %v", client.ObjectKeyFromObject(req), err) + t.Fatalf("Unable to create %q cluster: %v", client.ObjectKeyFromObject(req), err) } } @@ -122,7 +122,7 @@ func TestListMultikueClustersUsingKubeconfig(t *testing.T) { } } -func TestListMultikueConfigsUsingMultikueueClusters(t *testing.T) { +func TestListMultiKueueConfigsUsingMultiKueueClusters(t *testing.T) { cases := map[string]struct { configs []*kueuealpha.MultiKueueConfig filter client.ListOption @@ -160,7 +160,7 @@ func TestListMultikueConfigsUsingMultikueueClusters(t *testing.T) { k8sclient := builder.Build() for _, config := range tc.configs { if err := k8sclient.Create(ctx, config); err != nil { - t.Fatalf("Unable to create %s config: %v", client.ObjectKeyFromObject(config), err) + t.Fatalf("Unable to create %q config: %v", client.ObjectKeyFromObject(config), err) } } diff --git a/pkg/controller/admissionchecks/multikueue/jobset_adapter_test.go b/pkg/controller/admissionchecks/multikueue/jobset_adapter_test.go index 90811b29cf..5d72f6e474 100644 --- a/pkg/controller/admissionchecks/multikueue/jobset_adapter_test.go +++ b/pkg/controller/admissionchecks/multikueue/jobset_adapter_test.go @@ -27,6 +27,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" jobset "sigs.k8s.io/jobset/api/jobset/v1alpha2" + kueuealpha "sigs.k8s.io/kueue/apis/kueue/v1alpha1" kueue "sigs.k8s.io/kueue/apis/kueue/v1beta1" "sigs.k8s.io/kueue/pkg/controller/constants" "sigs.k8s.io/kueue/pkg/util/slices" @@ -226,7 +227,7 @@ func TestWlReconcileJobset(t *testing.T) { manageBuilder = manageBuilder.WithObjects( utiltesting.MakeMultiKueueConfig("config1").Clusters("worker1").Obj(), utiltesting.MakeAdmissionCheck("ac1").ControllerName(ControllerName). - Parameters(kueue.GroupVersion.Group, "MultiKueueConfig", "config1"). + Parameters(kueuealpha.GroupVersion.Group, "MultiKueueConfig", "config1"). Obj(), ) @@ -241,7 +242,7 @@ func TestWlReconcileJobset(t *testing.T) { w1remoteClient := newRemoteClient(managerClient, nil) w1remoteClient.client = worker1Client - cRec.clients["worker1"] = w1remoteClient + cRec.remoteClients["worker1"] = w1remoteClient helper, _ := newMultiKueueStoreHelper(managerClient) reconciler := newWlReconciler(managerClient, helper, cRec) diff --git a/pkg/controller/admissionchecks/multikueue/multikueuecluster.go b/pkg/controller/admissionchecks/multikueue/multikueuecluster.go index 29e74154bb..60f272c2db 100644 --- a/pkg/controller/admissionchecks/multikueue/multikueuecluster.go +++ b/pkg/controller/admissionchecks/multikueue/multikueuecluster.go @@ -121,23 +121,24 @@ func (rc *remoteClient) queueWorkloadEvent(ctx context.Context, ev watch.Event) localWl := &kueue.Workload{} if err := rc.localClient.Get(ctx, client.ObjectKeyFromObject(wl), localWl); err == nil { + rc.wlUpdateCh <- event.GenericEvent{Object: localWl} + } else { if !apierrors.IsNotFound(err) { ctrl.LoggerFrom(ctx).Error(err, "reading local workload") } - rc.wlUpdateCh <- event.GenericEvent{Object: localWl} } } // clustersReconciler implements the reconciler for all MultiKueueClusters. // Its main task being to maintain the list of remote clients associated to each MultiKueueCluster. type clustersReconciler struct { - client client.Client + localClient client.Client configNamespace string lock sync.RWMutex - // The list of remote clients, indexed by the cluster name. - clients map[string]*remoteClient - wlUpdateCh chan event.GenericEvent + // The list of remote remoteClients, indexed by the cluster name. + remoteClients map[string]*remoteClient + wlUpdateCh chan event.GenericEvent // rootContext - holds the context passed by the controller-runtime on Start. // It's used to create child contexts for MultiKueueClusters client watch routines @@ -161,9 +162,9 @@ func (c *clustersReconciler) Start(ctx context.Context) error { func (c *clustersReconciler) stopAndRemoveCluster(clusterName string) { c.lock.Lock() defer c.lock.Unlock() - if rc, found := c.clients[clusterName]; found { + if rc, found := c.remoteClients[clusterName]; found { rc.watchCancel() - delete(c.clients, clusterName) + delete(c.remoteClients, clusterName) } } @@ -171,18 +172,18 @@ func (c *clustersReconciler) setRemoteClientConfig(ctx context.Context, clusterN c.lock.Lock() defer c.lock.Unlock() - client, found := c.clients[clusterName] + client, found := c.remoteClients[clusterName] if !found { - client = newRemoteClient(c.client, c.wlUpdateCh) + client = newRemoteClient(c.localClient, c.wlUpdateCh) if c.builderOverride != nil { client.builderOverride = c.builderOverride } - c.clients[clusterName] = client + c.remoteClients[clusterName] = client } if err := client.setConfig(c.rootContext, kubeconfig); err != nil { - ctrl.LoggerFrom(ctx).Error(err, "reading local workload") - delete(c.clients, clusterName) + ctrl.LoggerFrom(ctx).Error(err, "failed to set kubeConfig in the remote client") + delete(c.remoteClients, clusterName) return err } return nil @@ -192,7 +193,7 @@ func (a *clustersReconciler) controllerFor(acName string) (*remoteClient, bool) a.lock.RLock() defer a.lock.RUnlock() - c, f := a.clients[acName] + c, f := a.remoteClients[acName] return c, f } @@ -200,11 +201,13 @@ func (c *clustersReconciler) Reconcile(ctx context.Context, req reconcile.Reques cluster := &kueuealpha.MultiKueueCluster{} log := ctrl.LoggerFrom(ctx) - err := c.client.Get(ctx, req.NamespacedName, cluster) + err := c.localClient.Get(ctx, req.NamespacedName, cluster) if client.IgnoreNotFound(err) != nil { return reconcile.Result{}, err } + log.V(2).Info("Reconcile MultiKueueCluster") + if err != nil || !cluster.DeletionTimestamp.IsZero() { c.stopAndRemoveCluster(req.Name) return reconcile.Result{}, nil @@ -233,7 +236,7 @@ func (c *clustersReconciler) getKubeConfig(ctx context.Context, ref *kueuealpha. Namespace: c.configNamespace, Name: ref.Location, } - err := c.client.Get(ctx, secretObjKey, &sec) + err := c.localClient.Get(ctx, secretObjKey, &sec) if err != nil { return nil, !apierrors.IsNotFound(err), err } @@ -257,14 +260,14 @@ func (c *clustersReconciler) updateStatus(ctx context.Context, cluster *kueuealp newCondition.Status = metav1.ConditionTrue } - // if the cluster is connected and remains that way, skip the status update + // if the condition is up to date oldCondition := apimeta.FindStatusCondition(cluster.Status.Conditions, kueuealpha.MultiKueueClusterActive) - if active && oldCondition != nil && oldCondition.Status == metav1.ConditionTrue { + if cmpConditionState(oldCondition, &newCondition) { return nil } apimeta.SetStatusCondition(&cluster.Status.Conditions, newCondition) - return c.client.Status().Update(ctx, cluster) + return c.localClient.Status().Update(ctx, cluster) } // +kubebuilder:rbac:groups="",resources=events,verbs=create;watch;update @@ -273,9 +276,9 @@ func (c *clustersReconciler) updateStatus(ctx context.Context, cluster *kueuealp func newClustersReconciler(c client.Client, namespace string) *clustersReconciler { return &clustersReconciler{ - client: c, + localClient: c, configNamespace: namespace, - clients: make(map[string]*remoteClient), + remoteClients: make(map[string]*remoteClient), wlUpdateCh: make(chan event.GenericEvent, 10), } } @@ -288,7 +291,7 @@ func (c *clustersReconciler) setupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&kueuealpha.MultiKueueCluster{}). - Watches(&corev1.Secret{}, &secretHandler{client: c.client}). + Watches(&corev1.Secret{}, &secretHandler{client: c.localClient}). Complete(c) } diff --git a/pkg/controller/admissionchecks/multikueue/multikueuecluster_test.go b/pkg/controller/admissionchecks/multikueue/multikueuecluster_test.go index 08130316f8..3550e1bbea 100644 --- a/pkg/controller/admissionchecks/multikueue/multikueuecluster_test.go +++ b/pkg/controller/admissionchecks/multikueue/multikueuecluster_test.go @@ -85,13 +85,13 @@ func TestUpdateConfig(t *testing.T) { "new valid client is added": { reconcileFor: "worker1", clusters: []kueuealpha.MultiKueueCluster{ - *utiltesting.MakeMultiKueueCluster("worker1").Secret("w1", "worker1").Obj(), + *utiltesting.MakeMultiKueueCluster("worker1").KubeConfig("w1", "worker1").Obj(), }, secrets: []corev1.Secret{ makeTestSecret("worker1", "worker1 kubeconfig"), }, wantClusters: []kueuealpha.MultiKueueCluster{ - *utiltesting.MakeMultiKueueCluster("worker1").Secret("w1", "worker1").Active(metav1.ConditionTrue, "Active", "Connected").Obj(), + *utiltesting.MakeMultiKueueCluster("worker1").KubeConfig("w1", "worker1").Active(metav1.ConditionTrue, "Active", "Connected").Obj(), }, wantRemoteClients: map[string]*remoteClient{ "worker1": { @@ -102,7 +102,7 @@ func TestUpdateConfig(t *testing.T) { "update client with valid config": { reconcileFor: "worker1", clusters: []kueuealpha.MultiKueueCluster{ - *utiltesting.MakeMultiKueueCluster("worker1").Secret("w1", "worker1").Obj(), + *utiltesting.MakeMultiKueueCluster("worker1").KubeConfig("w1", "worker1").Obj(), }, secrets: []corev1.Secret{ makeTestSecret("worker1", "worker1 kubeconfig"), @@ -111,7 +111,7 @@ func TestUpdateConfig(t *testing.T) { "worker1": newTestClient("worker1 old kubeconfig"), }, wantClusters: []kueuealpha.MultiKueueCluster{ - *utiltesting.MakeMultiKueueCluster("worker1").Secret("w1", "worker1").Active(metav1.ConditionTrue, "Active", "Connected").Obj(), + *utiltesting.MakeMultiKueueCluster("worker1").KubeConfig("w1", "worker1").Active(metav1.ConditionTrue, "Active", "Connected").Obj(), }, wantRemoteClients: map[string]*remoteClient{ "worker1": { @@ -122,7 +122,7 @@ func TestUpdateConfig(t *testing.T) { "update client with invalid config": { reconcileFor: "worker1", clusters: []kueuealpha.MultiKueueCluster{ - *utiltesting.MakeMultiKueueCluster("worker1").Secret("w1", "worker1").Obj(), + *utiltesting.MakeMultiKueueCluster("worker1").KubeConfig("w1", "worker1").Obj(), }, secrets: []corev1.Secret{ makeTestSecret("worker1", "invalid"), @@ -131,20 +131,20 @@ func TestUpdateConfig(t *testing.T) { "worker1": newTestClient("worker1 old kubeconfig"), }, wantClusters: []kueuealpha.MultiKueueCluster{ - *utiltesting.MakeMultiKueueCluster("worker1").Secret("w1", "worker1").Active(metav1.ConditionFalse, "ClientConnectionFailed", "invalid kubeconfig").Obj(), + *utiltesting.MakeMultiKueueCluster("worker1").KubeConfig("w1", "worker1").Active(metav1.ConditionFalse, "ClientConnectionFailed", "invalid kubeconfig").Obj(), }, }, "missing cluster is removed": { reconcileFor: "worker2", clusters: []kueuealpha.MultiKueueCluster{ - *utiltesting.MakeMultiKueueCluster("worker1").Secret("w1", "worker1").Obj(), + *utiltesting.MakeMultiKueueCluster("worker1").KubeConfig("w1", "worker1").Obj(), }, remoteClients: map[string]*remoteClient{ "worker1": newTestClient("worker1 kubeconfig"), "worker2": newTestClient("worker2 kubeconfig"), }, wantClusters: []kueuealpha.MultiKueueCluster{ - *utiltesting.MakeMultiKueueCluster("worker1").Secret("w1", "worker1").Obj(), + *utiltesting.MakeMultiKueueCluster("worker1").KubeConfig("w1", "worker1").Obj(), }, wantRemoteClients: map[string]*remoteClient{ "worker1": { @@ -165,7 +165,7 @@ func TestUpdateConfig(t *testing.T) { reconciler := newClustersReconciler(c, TestNamespace) if len(tc.remoteClients) > 0 { - reconciler.clients = tc.remoteClients + reconciler.remoteClients = tc.remoteClients } reconciler.builderOverride = fakeClientBuilder @@ -186,7 +186,7 @@ func TestUpdateConfig(t *testing.T) { t.Errorf("unexpected clusters (-want/+got):\n%s", diff) } - if diff := cmp.Diff(tc.wantRemoteClients, reconciler.clients, cmpopts.EquateEmpty(), + if diff := cmp.Diff(tc.wantRemoteClients, reconciler.remoteClients, cmpopts.EquateEmpty(), cmp.Comparer(func(a, b remoteClient) bool { return string(a.kubeconfig) == string(b.kubeconfig) })); diff != "" { t.Errorf("unexpected controllers (-want/+got):\n%s", diff) } diff --git a/pkg/controller/admissionchecks/multikueue/workload_test.go b/pkg/controller/admissionchecks/multikueue/workload_test.go index 9c9829a1dd..70744d1a86 100644 --- a/pkg/controller/admissionchecks/multikueue/workload_test.go +++ b/pkg/controller/admissionchecks/multikueue/workload_test.go @@ -28,6 +28,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" + kueuealpha "sigs.k8s.io/kueue/apis/kueue/v1alpha1" kueue "sigs.k8s.io/kueue/apis/kueue/v1beta1" "sigs.k8s.io/kueue/pkg/controller/constants" "sigs.k8s.io/kueue/pkg/util/slices" @@ -294,7 +295,7 @@ func TestWlReconcile(t *testing.T) { manageBuilder = manageBuilder.WithObjects( utiltesting.MakeMultiKueueConfig("config1").Clusters("worker1").Obj(), utiltesting.MakeAdmissionCheck("ac1").ControllerName(ControllerName). - Parameters(kueue.GroupVersion.Group, "MultiKueueConfig", "config1"). + Parameters(kueuealpha.GroupVersion.Group, "MultiKueueConfig", "config1"). Obj(), ) @@ -308,7 +309,7 @@ func TestWlReconcile(t *testing.T) { w1remoteClient := newRemoteClient(managerClient, nil) w1remoteClient.client = worker1Client - cRec.clients["worker1"] = w1remoteClient + cRec.remoteClients["worker1"] = w1remoteClient helper, _ := newMultiKueueStoreHelper(managerClient) reconciler := newWlReconciler(managerClient, helper, cRec) diff --git a/pkg/util/testing/wrappers.go b/pkg/util/testing/wrappers.go index 20c11fc81b..7c51fa0f6f 100644 --- a/pkg/util/testing/wrappers.go +++ b/pkg/util/testing/wrappers.go @@ -789,7 +789,7 @@ func (mkc *MultiKueueClusterWrapper) Obj() *kueuealpha.MultiKueueCluster { return &mkc.MultiKueueCluster } -func (mkc *MultiKueueClusterWrapper) Secret(name, secretName string) *MultiKueueClusterWrapper { +func (mkc *MultiKueueClusterWrapper) KubeConfig(name, secretName string) *MultiKueueClusterWrapper { mkc.Spec.KubeConfig = kueuealpha.KubeConfig{ Name: name, Location: secretName, diff --git a/test/e2e/multikueue/e2e_test.go b/test/e2e/multikueue/e2e_test.go index f64716b317..af70591114 100644 --- a/test/e2e/multikueue/e2e_test.go +++ b/test/e2e/multikueue/e2e_test.go @@ -88,10 +88,10 @@ var _ = ginkgo.Describe("MultiKueue", func() { } gomega.Expect(k8sWorker2Client.Create(ctx, worker2Ns)).To(gomega.Succeed()) - workerCluster1 = utiltesting.MakeMultiKueueCluster("worker1").Secret("worker1", "multikueue1").Obj() + workerCluster1 = utiltesting.MakeMultiKueueCluster("worker1").KubeConfig("worker1", "multikueue1").Obj() gomega.Expect(k8sManagerClient.Create(ctx, workerCluster1)).To(gomega.Succeed()) - workerCluster2 = utiltesting.MakeMultiKueueCluster("worker2").Secret("worker2", "multikueue2").Obj() + workerCluster2 = utiltesting.MakeMultiKueueCluster("worker2").KubeConfig("worker2", "multikueue2").Obj() gomega.Expect(k8sManagerClient.Create(ctx, workerCluster2)).To(gomega.Succeed()) multiKueueConfig = utiltesting.MakeMultiKueueConfig("multikueueconfig").Clusters("worker1", "worker2").Obj() @@ -99,7 +99,7 @@ var _ = ginkgo.Describe("MultiKueue", func() { multiKueueAc = utiltesting.MakeAdmissionCheck("ac1"). ControllerName(multikueue.ControllerName). - Parameters(kueue.GroupVersion.Group, "MultiKueueConfig", multiKueueConfig.Name). + Parameters(kueuealpha.GroupVersion.Group, "MultiKueueConfig", multiKueueConfig.Name). Obj() gomega.Expect(k8sManagerClient.Create(ctx, multiKueueAc)).Should(gomega.Succeed()) diff --git a/test/integration/multikueue/multikueue_test.go b/test/integration/multikueue/multikueue_test.go index e4d537d1bf..ce0d087fd8 100644 --- a/test/integration/multikueue/multikueue_test.go +++ b/test/integration/multikueue/multikueue_test.go @@ -111,10 +111,10 @@ var _ = ginkgo.Describe("Multikueue", func() { } gomega.Expect(managerTestCluster.client.Create(managerTestCluster.ctx, managerMultikueueSecret2)).To(gomega.Succeed()) - workerCluster1 = utiltesting.MakeMultiKueueCluster("worker1").Secret("worker1", managerMultikueueSecret1.Name).Obj() + workerCluster1 = utiltesting.MakeMultiKueueCluster("worker1").KubeConfig("worker1", managerMultikueueSecret1.Name).Obj() gomega.Expect(managerTestCluster.client.Create(managerTestCluster.ctx, workerCluster1)).To(gomega.Succeed()) - workerCluster2 = utiltesting.MakeMultiKueueCluster("worker2").Secret("worker2", managerMultikueueSecret2.Name).Obj() + workerCluster2 = utiltesting.MakeMultiKueueCluster("worker2").KubeConfig("worker2", managerMultikueueSecret2.Name).Obj() gomega.Expect(managerTestCluster.client.Create(managerTestCluster.ctx, workerCluster2)).To(gomega.Succeed()) managerMultiKueueConfig = utiltesting.MakeMultiKueueConfig("multikueueconfig").Clusters(workerCluster1.Name, workerCluster2.Name).Obj() @@ -122,7 +122,7 @@ var _ = ginkgo.Describe("Multikueue", func() { multikueueAC = utiltesting.MakeAdmissionCheck("ac1"). ControllerName(multikueue.ControllerName). - Parameters(kueue.GroupVersion.Group, "MultiKueueConfig", managerMultiKueueConfig.Name). + Parameters(kueuealpha.GroupVersion.Group, "MultiKueueConfig", managerMultiKueueConfig.Name). Obj() gomega.Expect(managerTestCluster.client.Create(managerTestCluster.ctx, multikueueAC)).Should(gomega.Succeed()) @@ -170,6 +170,135 @@ var _ = ginkgo.Describe("Multikueue", func() { gomega.Expect(managerTestCluster.client.Delete(managerTestCluster.ctx, managerMultikueueSecret1)).To(gomega.Succeed()) gomega.Expect(managerTestCluster.client.Delete(managerTestCluster.ctx, managerMultikueueSecret2)).To(gomega.Succeed()) }) + ginkgo.It("Should properly manage the active condition of AdmissionChecks and MultiKueueClusters", func() { + ac := utiltesting.MakeAdmissionCheck("testing-ac"). + ControllerName(multikueue.ControllerName). + Parameters(kueuealpha.GroupVersion.Group, "MultiKueueConfig", "testing-config"). + Obj() + ginkgo.By("creating the admission check with missing config, it's set inactive", func() { + gomega.Expect(managerTestCluster.client.Create(managerTestCluster.ctx, ac)).Should(gomega.Succeed()) + ginkgo.DeferCleanup(func() error { return managerTestCluster.client.Delete(managerTestCluster.ctx, ac) }) + + ginkgo.By("wait for the check's active state update", func() { + updatetedAc := kueue.AdmissionCheck{} + acKey := client.ObjectKeyFromObject(ac) + gomega.Eventually(func(g gomega.Gomega) { + g.Expect(managerTestCluster.client.Get(managerTestCluster.ctx, acKey, &updatetedAc)).To(gomega.Succeed()) + g.Expect(updatetedAc.Status.Conditions).To(gomega.ContainElement(gomega.BeComparableTo(metav1.Condition{ + Type: kueue.AdmissionCheckActive, + Status: metav1.ConditionFalse, + Reason: "Inactive", + Message: `Cannot load the AdmissionChecks parameters: MultiKueueConfig.kueue.x-k8s.io "testing-config" not found`, + }, cmpopts.IgnoreFields(metav1.Condition{}, "LastTransitionTime")))) + }, util.Timeout, util.Interval).Should(gomega.Succeed()) + }) + }) + + ginkgo.By("creating a config with duplicate clusters should fail", func() { + badConfig := utiltesting.MakeMultiKueueConfig("bad-config").Clusters("c1", "c2", "c1").Obj() + gomega.Expect(managerTestCluster.client.Create(managerTestCluster.ctx, badConfig).Error()).Should(gomega.Equal( + `MultiKueueConfig.kueue.x-k8s.io "bad-config" is invalid: spec.clusters[2]: Duplicate value: "c1"`)) + }) + + config := utiltesting.MakeMultiKueueConfig("testing-config").Clusters("testing-cluster").Obj() + ginkgo.By("creating the config, the admission check's state is updated", func() { + gomega.Expect(managerTestCluster.client.Create(managerTestCluster.ctx, config)).Should(gomega.Succeed()) + ginkgo.DeferCleanup(func() error { return managerTestCluster.client.Delete(managerTestCluster.ctx, config) }) + + ginkgo.By("wait for the check's active state update", func() { + updatetedAc := kueue.AdmissionCheck{} + acKey := client.ObjectKeyFromObject(ac) + gomega.Eventually(func(g gomega.Gomega) { + g.Expect(managerTestCluster.client.Get(managerTestCluster.ctx, acKey, &updatetedAc)).To(gomega.Succeed()) + g.Expect(updatetedAc.Status.Conditions).To(gomega.ContainElement(gomega.BeComparableTo(metav1.Condition{ + Type: kueue.AdmissionCheckActive, + Status: metav1.ConditionFalse, + Reason: "Inactive", + Message: `Missing clusters: [testing-cluster]`, + }, cmpopts.IgnoreFields(metav1.Condition{}, "LastTransitionTime")))) + }, util.Timeout, util.Interval).Should(gomega.Succeed()) + }) + }) + + cluster := utiltesting.MakeMultiKueueCluster("testing-cluster").KubeConfig("", "testing-secret").Obj() + ginkgo.By("creating the cluster, its Active state is updated, the admission check's state is updated", func() { + gomega.Expect(managerTestCluster.client.Create(managerTestCluster.ctx, cluster)).Should(gomega.Succeed()) + ginkgo.DeferCleanup(func() error { return managerTestCluster.client.Delete(managerTestCluster.ctx, cluster) }) + + ginkgo.By("wait for the cluster's active state update", func() { + updatetedCluster := kueuealpha.MultiKueueCluster{} + clusterKey := client.ObjectKeyFromObject(cluster) + gomega.Eventually(func(g gomega.Gomega) { + g.Expect(managerTestCluster.client.Get(managerTestCluster.ctx, clusterKey, &updatetedCluster)).To(gomega.Succeed()) + g.Expect(updatetedCluster.Status.Conditions).To(gomega.ContainElement(gomega.BeComparableTo(metav1.Condition{ + Type: kueuealpha.MultiKueueClusterActive, + Status: metav1.ConditionFalse, + Reason: "BadConfig", + Message: `Secret "testing-secret" not found`, + }, cmpopts.IgnoreFields(metav1.Condition{}, "LastTransitionTime")))) + }, util.Timeout, util.Interval).Should(gomega.Succeed()) + }) + + ginkgo.By("wait for the check's active state update", func() { + updatetedAc := kueue.AdmissionCheck{} + acKey := client.ObjectKeyFromObject(ac) + gomega.Eventually(func(g gomega.Gomega) { + g.Expect(managerTestCluster.client.Get(managerTestCluster.ctx, acKey, &updatetedAc)).To(gomega.Succeed()) + g.Expect(updatetedAc.Status.Conditions).To(gomega.ContainElement(gomega.BeComparableTo(metav1.Condition{ + Type: kueue.AdmissionCheckActive, + Status: metav1.ConditionFalse, + Reason: "Inactive", + Message: `Inactive clusters: [testing-cluster]`, + }, cmpopts.IgnoreFields(metav1.Condition{}, "LastTransitionTime")))) + }, util.Timeout, util.Interval).Should(gomega.Succeed()) + }) + }) + + w1Kubeconfig, err := worker1TestCluster.kubeConfigBytes() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testing-secret", + Namespace: managersConfigNamespace.Name, + }, + Data: map[string][]byte{ + kueuealpha.MultiKueueConfigSecretKey: w1Kubeconfig, + }, + } + + ginkgo.By("creating the secret, the cluster and admission check become active", func() { + gomega.Expect(managerTestCluster.client.Create(managerTestCluster.ctx, secret)).Should(gomega.Succeed()) + ginkgo.DeferCleanup(func() error { return managerTestCluster.client.Delete(managerTestCluster.ctx, secret) }) + + ginkgo.By("wait for the cluster's active state update", func() { + updatetedCluster := kueuealpha.MultiKueueCluster{} + clusterKey := client.ObjectKeyFromObject(cluster) + gomega.Eventually(func(g gomega.Gomega) { + g.Expect(managerTestCluster.client.Get(managerTestCluster.ctx, clusterKey, &updatetedCluster)).To(gomega.Succeed()) + g.Expect(updatetedCluster.Status.Conditions).To(gomega.ContainElement(gomega.BeComparableTo(metav1.Condition{ + Type: kueuealpha.MultiKueueClusterActive, + Status: metav1.ConditionTrue, + Reason: "Active", + Message: "Connected", + }, cmpopts.IgnoreFields(metav1.Condition{}, "LastTransitionTime")))) + }, util.Timeout, util.Interval).Should(gomega.Succeed()) + }) + + ginkgo.By("wait for the check's active state update", func() { + updatetedAc := kueue.AdmissionCheck{} + acKey := client.ObjectKeyFromObject(ac) + gomega.Eventually(func(g gomega.Gomega) { + g.Expect(managerTestCluster.client.Get(managerTestCluster.ctx, acKey, &updatetedAc)).To(gomega.Succeed()) + g.Expect(updatetedAc.Status.Conditions).To(gomega.ContainElement(gomega.BeComparableTo(metav1.Condition{ + Type: kueue.AdmissionCheckActive, + Status: metav1.ConditionTrue, + Reason: "Active", + Message: "The admission check is active", + }, cmpopts.IgnoreFields(metav1.Condition{}, "LastTransitionTime")))) + }, util.Timeout, util.Interval).Should(gomega.Succeed()) + }) + }) + }) ginkgo.It("Should run a job on worker if admitted", func() { job := testingjob.MakeJob("job", managerNs.Name). diff --git a/test/integration/multikueue/suite_test.go b/test/integration/multikueue/suite_test.go index a614e3dc0a..49f2f255a2 100644 --- a/test/integration/multikueue/suite_test.go +++ b/test/integration/multikueue/suite_test.go @@ -86,7 +86,7 @@ var ( managersConfigNamespace *corev1.Namespace ) -func TestScheduler(t *testing.T) { +func TestMultiKueue(t *testing.T) { gomega.RegisterFailHandler(ginkgo.Fail) ginkgo.RunSpecs(t, @@ -166,6 +166,9 @@ func managerAndMultiKueueSetup(mgr manager.Manager, ctx context.Context) { } gomega.Expect(mgr.GetClient().Create(ctx, managersConfigNamespace)).To(gomega.Succeed()) - err := multikueue.SetupControllers(mgr, managersConfigNamespace.Name) + err := multikueue.SetupIndexer(ctx, mgr.GetFieldIndexer(), managersConfigNamespace.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + err = multikueue.SetupControllers(mgr, managersConfigNamespace.Name) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } From 0755fb1d7800a25a9618188d447b706d1b6f134b Mon Sep 17 00:00:00 2001 From: Traian Schiau Date: Fri, 26 Jan 2024 12:50:53 +0200 Subject: [PATCH 7/9] [API/multikueue] Drop KubeConfig.Name --- apis/kueue/v1alpha1/multikueue_types.go | 3 --- .../crd/kueue.x-k8s.io_multikueueclusters.yaml | 4 ---- .../kueue/v1alpha1/kubeconfig.go | 9 --------- .../bases/kueue.x-k8s.io_multikueueclusters.yaml | 4 ---- keps/693-multikueue/README.md | 3 --- .../admissionchecks/multikueue/indexer_test.go | 8 ++++---- .../multikueue/multikueuecluster_test.go | 16 ++++++++-------- pkg/util/testing/wrappers.go | 3 +-- test/e2e/multikueue/e2e_test.go | 4 ++-- test/integration/multikueue/multikueue_test.go | 6 +++--- 10 files changed, 18 insertions(+), 42 deletions(-) diff --git a/apis/kueue/v1alpha1/multikueue_types.go b/apis/kueue/v1alpha1/multikueue_types.go index b421ade631..8d049d9afe 100644 --- a/apis/kueue/v1alpha1/multikueue_types.go +++ b/apis/kueue/v1alpha1/multikueue_types.go @@ -34,9 +34,6 @@ const ( ) type KubeConfig struct { - // Name of the cluster inside the given KubeConfig. - Name string `json:"name"` - // Location of the KubeConfig. // // If LocationType is Secret then Location is the name of the secret inside the namespace in diff --git a/charts/kueue/templates/crd/kueue.x-k8s.io_multikueueclusters.yaml b/charts/kueue/templates/crd/kueue.x-k8s.io_multikueueclusters.yaml index 9a67db9391..eec469abd0 100644 --- a/charts/kueue/templates/crd/kueue.x-k8s.io_multikueueclusters.yaml +++ b/charts/kueue/templates/crd/kueue.x-k8s.io_multikueueclusters.yaml @@ -61,13 +61,9 @@ spec: enum: - Secret type: string - name: - description: Name of the cluster inside the given KubeConfig. - type: string required: - location - locationType - - name type: object required: - kubeConfig diff --git a/client-go/applyconfiguration/kueue/v1alpha1/kubeconfig.go b/client-go/applyconfiguration/kueue/v1alpha1/kubeconfig.go index aa492329e5..7a0422c836 100644 --- a/client-go/applyconfiguration/kueue/v1alpha1/kubeconfig.go +++ b/client-go/applyconfiguration/kueue/v1alpha1/kubeconfig.go @@ -24,7 +24,6 @@ import ( // KubeConfigApplyConfiguration represents an declarative configuration of the KubeConfig type for use // with apply. type KubeConfigApplyConfiguration struct { - Name *string `json:"name,omitempty"` Location *string `json:"location,omitempty"` LocationType *v1alpha1.LocationType `json:"locationType,omitempty"` } @@ -35,14 +34,6 @@ func KubeConfig() *KubeConfigApplyConfiguration { return &KubeConfigApplyConfiguration{} } -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *KubeConfigApplyConfiguration) WithName(value string) *KubeConfigApplyConfiguration { - b.Name = &value - return b -} - // WithLocation sets the Location field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Location field is set to the value of the last call. diff --git a/config/components/crd/bases/kueue.x-k8s.io_multikueueclusters.yaml b/config/components/crd/bases/kueue.x-k8s.io_multikueueclusters.yaml index edd4ce793d..df69b19036 100644 --- a/config/components/crd/bases/kueue.x-k8s.io_multikueueclusters.yaml +++ b/config/components/crd/bases/kueue.x-k8s.io_multikueueclusters.yaml @@ -48,13 +48,9 @@ spec: enum: - Secret type: string - name: - description: Name of the cluster inside the given KubeConfig. - type: string required: - location - locationType - - name type: object required: - kubeConfig diff --git a/keps/693-multikueue/README.md b/keps/693-multikueue/README.md index c125a766f1..b9a081878c 100644 --- a/keps/693-multikueue/README.md +++ b/keps/693-multikueue/README.md @@ -174,9 +174,6 @@ type MultiKueueClusterSpec { } type KubeConfig struct { - // Name of the cluster inside the given KubeConfig. - Name string `json:"name"` - // Location of the KubeConfig. Location string `json:"location"` diff --git a/pkg/controller/admissionchecks/multikueue/indexer_test.go b/pkg/controller/admissionchecks/multikueue/indexer_test.go index b886c0cda0..cd8f339ad3 100644 --- a/pkg/controller/admissionchecks/multikueue/indexer_test.go +++ b/pkg/controller/admissionchecks/multikueue/indexer_test.go @@ -77,21 +77,21 @@ func TestListMultiKueueClustersUsingKubeConfig(t *testing.T) { }, "single cluster, single match": { clusters: []*kueuealpha.MultiKueueCluster{ - utiltesting.MakeMultiKueueCluster("cluster1").KubeConfig("", "secret1").Obj(), + utiltesting.MakeMultiKueueCluster("cluster1").KubeConfig("secret1").Obj(), }, filter: client.MatchingFields{UsingKubeConfigs: TestNamespace + "/secret1"}, wantList: []string{"cluster1"}, }, "single cluster, no match": { clusters: []*kueuealpha.MultiKueueCluster{ - utiltesting.MakeMultiKueueCluster("cluster2").KubeConfig("", "secret2").Obj(), + utiltesting.MakeMultiKueueCluster("cluster2").KubeConfig("secret2").Obj(), }, filter: client.MatchingFields{UsingKubeConfigs: TestNamespace + "/secret1"}, }, "multiple clusters, single match": { clusters: []*kueuealpha.MultiKueueCluster{ - utiltesting.MakeMultiKueueCluster("cluster1").KubeConfig("", "secret1").Obj(), - utiltesting.MakeMultiKueueCluster("cluster2").KubeConfig("", "secret2").Obj(), + utiltesting.MakeMultiKueueCluster("cluster1").KubeConfig("secret1").Obj(), + utiltesting.MakeMultiKueueCluster("cluster2").KubeConfig("secret2").Obj(), }, filter: client.MatchingFields{UsingKubeConfigs: TestNamespace + "/secret1"}, wantList: []string{"cluster1"}, diff --git a/pkg/controller/admissionchecks/multikueue/multikueuecluster_test.go b/pkg/controller/admissionchecks/multikueue/multikueuecluster_test.go index 3550e1bbea..170e6463b1 100644 --- a/pkg/controller/admissionchecks/multikueue/multikueuecluster_test.go +++ b/pkg/controller/admissionchecks/multikueue/multikueuecluster_test.go @@ -85,13 +85,13 @@ func TestUpdateConfig(t *testing.T) { "new valid client is added": { reconcileFor: "worker1", clusters: []kueuealpha.MultiKueueCluster{ - *utiltesting.MakeMultiKueueCluster("worker1").KubeConfig("w1", "worker1").Obj(), + *utiltesting.MakeMultiKueueCluster("worker1").KubeConfig("worker1").Obj(), }, secrets: []corev1.Secret{ makeTestSecret("worker1", "worker1 kubeconfig"), }, wantClusters: []kueuealpha.MultiKueueCluster{ - *utiltesting.MakeMultiKueueCluster("worker1").KubeConfig("w1", "worker1").Active(metav1.ConditionTrue, "Active", "Connected").Obj(), + *utiltesting.MakeMultiKueueCluster("worker1").KubeConfig("worker1").Active(metav1.ConditionTrue, "Active", "Connected").Obj(), }, wantRemoteClients: map[string]*remoteClient{ "worker1": { @@ -102,7 +102,7 @@ func TestUpdateConfig(t *testing.T) { "update client with valid config": { reconcileFor: "worker1", clusters: []kueuealpha.MultiKueueCluster{ - *utiltesting.MakeMultiKueueCluster("worker1").KubeConfig("w1", "worker1").Obj(), + *utiltesting.MakeMultiKueueCluster("worker1").KubeConfig("worker1").Obj(), }, secrets: []corev1.Secret{ makeTestSecret("worker1", "worker1 kubeconfig"), @@ -111,7 +111,7 @@ func TestUpdateConfig(t *testing.T) { "worker1": newTestClient("worker1 old kubeconfig"), }, wantClusters: []kueuealpha.MultiKueueCluster{ - *utiltesting.MakeMultiKueueCluster("worker1").KubeConfig("w1", "worker1").Active(metav1.ConditionTrue, "Active", "Connected").Obj(), + *utiltesting.MakeMultiKueueCluster("worker1").KubeConfig("worker1").Active(metav1.ConditionTrue, "Active", "Connected").Obj(), }, wantRemoteClients: map[string]*remoteClient{ "worker1": { @@ -122,7 +122,7 @@ func TestUpdateConfig(t *testing.T) { "update client with invalid config": { reconcileFor: "worker1", clusters: []kueuealpha.MultiKueueCluster{ - *utiltesting.MakeMultiKueueCluster("worker1").KubeConfig("w1", "worker1").Obj(), + *utiltesting.MakeMultiKueueCluster("worker1").KubeConfig("worker1").Obj(), }, secrets: []corev1.Secret{ makeTestSecret("worker1", "invalid"), @@ -131,20 +131,20 @@ func TestUpdateConfig(t *testing.T) { "worker1": newTestClient("worker1 old kubeconfig"), }, wantClusters: []kueuealpha.MultiKueueCluster{ - *utiltesting.MakeMultiKueueCluster("worker1").KubeConfig("w1", "worker1").Active(metav1.ConditionFalse, "ClientConnectionFailed", "invalid kubeconfig").Obj(), + *utiltesting.MakeMultiKueueCluster("worker1").KubeConfig("worker1").Active(metav1.ConditionFalse, "ClientConnectionFailed", "invalid kubeconfig").Obj(), }, }, "missing cluster is removed": { reconcileFor: "worker2", clusters: []kueuealpha.MultiKueueCluster{ - *utiltesting.MakeMultiKueueCluster("worker1").KubeConfig("w1", "worker1").Obj(), + *utiltesting.MakeMultiKueueCluster("worker1").KubeConfig("worker1").Obj(), }, remoteClients: map[string]*remoteClient{ "worker1": newTestClient("worker1 kubeconfig"), "worker2": newTestClient("worker2 kubeconfig"), }, wantClusters: []kueuealpha.MultiKueueCluster{ - *utiltesting.MakeMultiKueueCluster("worker1").KubeConfig("w1", "worker1").Obj(), + *utiltesting.MakeMultiKueueCluster("worker1").KubeConfig("worker1").Obj(), }, wantRemoteClients: map[string]*remoteClient{ "worker1": { diff --git a/pkg/util/testing/wrappers.go b/pkg/util/testing/wrappers.go index 7c51fa0f6f..5ae00203de 100644 --- a/pkg/util/testing/wrappers.go +++ b/pkg/util/testing/wrappers.go @@ -789,9 +789,8 @@ func (mkc *MultiKueueClusterWrapper) Obj() *kueuealpha.MultiKueueCluster { return &mkc.MultiKueueCluster } -func (mkc *MultiKueueClusterWrapper) KubeConfig(name, secretName string) *MultiKueueClusterWrapper { +func (mkc *MultiKueueClusterWrapper) KubeConfig(secretName string) *MultiKueueClusterWrapper { mkc.Spec.KubeConfig = kueuealpha.KubeConfig{ - Name: name, Location: secretName, LocationType: kueuealpha.SecretLocationType, } diff --git a/test/e2e/multikueue/e2e_test.go b/test/e2e/multikueue/e2e_test.go index af70591114..c5af0ada1d 100644 --- a/test/e2e/multikueue/e2e_test.go +++ b/test/e2e/multikueue/e2e_test.go @@ -88,10 +88,10 @@ var _ = ginkgo.Describe("MultiKueue", func() { } gomega.Expect(k8sWorker2Client.Create(ctx, worker2Ns)).To(gomega.Succeed()) - workerCluster1 = utiltesting.MakeMultiKueueCluster("worker1").KubeConfig("worker1", "multikueue1").Obj() + workerCluster1 = utiltesting.MakeMultiKueueCluster("worker1").KubeConfig("multikueue1").Obj() gomega.Expect(k8sManagerClient.Create(ctx, workerCluster1)).To(gomega.Succeed()) - workerCluster2 = utiltesting.MakeMultiKueueCluster("worker2").KubeConfig("worker2", "multikueue2").Obj() + workerCluster2 = utiltesting.MakeMultiKueueCluster("worker2").KubeConfig("multikueue2").Obj() gomega.Expect(k8sManagerClient.Create(ctx, workerCluster2)).To(gomega.Succeed()) multiKueueConfig = utiltesting.MakeMultiKueueConfig("multikueueconfig").Clusters("worker1", "worker2").Obj() diff --git a/test/integration/multikueue/multikueue_test.go b/test/integration/multikueue/multikueue_test.go index ce0d087fd8..94fda90c14 100644 --- a/test/integration/multikueue/multikueue_test.go +++ b/test/integration/multikueue/multikueue_test.go @@ -111,10 +111,10 @@ var _ = ginkgo.Describe("Multikueue", func() { } gomega.Expect(managerTestCluster.client.Create(managerTestCluster.ctx, managerMultikueueSecret2)).To(gomega.Succeed()) - workerCluster1 = utiltesting.MakeMultiKueueCluster("worker1").KubeConfig("worker1", managerMultikueueSecret1.Name).Obj() + workerCluster1 = utiltesting.MakeMultiKueueCluster("worker1").KubeConfig(managerMultikueueSecret1.Name).Obj() gomega.Expect(managerTestCluster.client.Create(managerTestCluster.ctx, workerCluster1)).To(gomega.Succeed()) - workerCluster2 = utiltesting.MakeMultiKueueCluster("worker2").KubeConfig("worker2", managerMultikueueSecret2.Name).Obj() + workerCluster2 = utiltesting.MakeMultiKueueCluster("worker2").KubeConfig(managerMultikueueSecret2.Name).Obj() gomega.Expect(managerTestCluster.client.Create(managerTestCluster.ctx, workerCluster2)).To(gomega.Succeed()) managerMultiKueueConfig = utiltesting.MakeMultiKueueConfig("multikueueconfig").Clusters(workerCluster1.Name, workerCluster2.Name).Obj() @@ -220,7 +220,7 @@ var _ = ginkgo.Describe("Multikueue", func() { }) }) - cluster := utiltesting.MakeMultiKueueCluster("testing-cluster").KubeConfig("", "testing-secret").Obj() + cluster := utiltesting.MakeMultiKueueCluster("testing-cluster").KubeConfig("testing-secret").Obj() ginkgo.By("creating the cluster, its Active state is updated, the admission check's state is updated", func() { gomega.Expect(managerTestCluster.client.Create(managerTestCluster.ctx, cluster)).Should(gomega.Succeed()) ginkgo.DeferCleanup(func() error { return managerTestCluster.client.Delete(managerTestCluster.ctx, cluster) }) From 66f5dc4760726f4ecc169f882971562d6b4c2a30 Mon Sep 17 00:00:00 2001 From: Traian Schiau Date: Sat, 27 Jan 2024 10:19:17 +0200 Subject: [PATCH 8/9] Review remarks --- .../multikueue/multikueuecluster.go | 32 ++++++++++++------- 1 file changed, 21 insertions(+), 11 deletions(-) diff --git a/pkg/controller/admissionchecks/multikueue/multikueuecluster.go b/pkg/controller/admissionchecks/multikueue/multikueuecluster.go index 60f272c2db..216a4b9578 100644 --- a/pkg/controller/admissionchecks/multikueue/multikueuecluster.go +++ b/pkg/controller/admissionchecks/multikueue/multikueuecluster.go @@ -18,7 +18,6 @@ package multikueue import ( "context" - "errors" "fmt" "strings" "sync" @@ -302,36 +301,47 @@ type secretHandler struct { var _ handler.EventHandler = (*secretHandler)(nil) func (s *secretHandler) Create(ctx context.Context, event event.CreateEvent, q workqueue.RateLimitingInterface) { - if err := s.queue(ctx, event.Object, q); err != nil { + secret, isSecret := event.Object.(*corev1.Secret) + if !isSecret { + ctrl.LoggerFrom(ctx).V(5).Info("Failure on create event, not a secret") + } + if err := s.queue(ctx, secret, q); err != nil { ctrl.LoggerFrom(ctx).V(5).Error(err, "Failure on create event", "secret", klog.KObj(event.Object)) } } func (s *secretHandler) Update(ctx context.Context, event event.UpdateEvent, q workqueue.RateLimitingInterface) { - if err := s.queue(ctx, event.ObjectOld, q); err != nil { + secret, isSecret := event.ObjectNew.(*corev1.Secret) + if !isSecret { + ctrl.LoggerFrom(ctx).V(5).Info("Failure on update event, not a secret") + } + if err := s.queue(ctx, secret, q); err != nil { ctrl.LoggerFrom(ctx).V(5).Error(err, "Failure on update event", "secret", klog.KObj(event.ObjectOld)) } } func (s *secretHandler) Delete(ctx context.Context, event event.DeleteEvent, q workqueue.RateLimitingInterface) { - if err := s.queue(ctx, event.Object, q); err != nil { + secret, isSecret := event.Object.(*corev1.Secret) + if !isSecret { + ctrl.LoggerFrom(ctx).V(5).Info("Failure on delete event, not a secret") + } + if err := s.queue(ctx, secret, q); err != nil { ctrl.LoggerFrom(ctx).V(5).Error(err, "Failure on delete event", "secret", klog.KObj(event.Object)) } } func (s *secretHandler) Generic(ctx context.Context, event event.GenericEvent, q workqueue.RateLimitingInterface) { - if err := s.queue(ctx, event.Object, q); err != nil { + secret, isSecret := event.Object.(*corev1.Secret) + if !isSecret { + ctrl.LoggerFrom(ctx).V(5).Info("Failure on generic event, not a secret") + } + if err := s.queue(ctx, secret, q); err != nil { ctrl.LoggerFrom(ctx).V(5).Error(err, "Failure on generic event", "secret", klog.KObj(event.Object)) } } -func (s *secretHandler) queue(ctx context.Context, obj client.Object, q workqueue.RateLimitingInterface) error { +func (s *secretHandler) queue(ctx context.Context, secret *corev1.Secret, q workqueue.RateLimitingInterface) error { users := &kueuealpha.MultiKueueClusterList{} - secret, isSecret := obj.(*corev1.Secret) - if !isSecret { - return errors.New("not a secret") - } - if err := s.client.List(ctx, users, client.MatchingFields{UsingKubeConfigs: strings.Join([]string{secret.Namespace, secret.Name}, "/")}); err != nil { return err } From 1643981cb878cd71c9a7903fe8e4621a55aa3d83 Mon Sep 17 00:00:00 2001 From: Traian Schiau Date: Mon, 29 Jan 2024 08:24:56 +0200 Subject: [PATCH 9/9] Review remarks --- .../admissionchecks/multikueue/multikueuecluster.go | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/pkg/controller/admissionchecks/multikueue/multikueuecluster.go b/pkg/controller/admissionchecks/multikueue/multikueuecluster.go index 216a4b9578..35a5adf7e3 100644 --- a/pkg/controller/admissionchecks/multikueue/multikueuecluster.go +++ b/pkg/controller/admissionchecks/multikueue/multikueuecluster.go @@ -18,6 +18,7 @@ package multikueue import ( "context" + "errors" "fmt" "strings" "sync" @@ -303,7 +304,8 @@ var _ handler.EventHandler = (*secretHandler)(nil) func (s *secretHandler) Create(ctx context.Context, event event.CreateEvent, q workqueue.RateLimitingInterface) { secret, isSecret := event.Object.(*corev1.Secret) if !isSecret { - ctrl.LoggerFrom(ctx).V(5).Info("Failure on create event, not a secret") + ctrl.LoggerFrom(ctx).V(5).Error(errors.New("not a secret"), "Failure on create event") + return } if err := s.queue(ctx, secret, q); err != nil { ctrl.LoggerFrom(ctx).V(5).Error(err, "Failure on create event", "secret", klog.KObj(event.Object)) @@ -313,7 +315,8 @@ func (s *secretHandler) Create(ctx context.Context, event event.CreateEvent, q w func (s *secretHandler) Update(ctx context.Context, event event.UpdateEvent, q workqueue.RateLimitingInterface) { secret, isSecret := event.ObjectNew.(*corev1.Secret) if !isSecret { - ctrl.LoggerFrom(ctx).V(5).Info("Failure on update event, not a secret") + ctrl.LoggerFrom(ctx).V(5).Error(errors.New("not a secret"), "Failure on update event") + return } if err := s.queue(ctx, secret, q); err != nil { ctrl.LoggerFrom(ctx).V(5).Error(err, "Failure on update event", "secret", klog.KObj(event.ObjectOld)) @@ -323,7 +326,8 @@ func (s *secretHandler) Update(ctx context.Context, event event.UpdateEvent, q w func (s *secretHandler) Delete(ctx context.Context, event event.DeleteEvent, q workqueue.RateLimitingInterface) { secret, isSecret := event.Object.(*corev1.Secret) if !isSecret { - ctrl.LoggerFrom(ctx).V(5).Info("Failure on delete event, not a secret") + ctrl.LoggerFrom(ctx).V(5).Error(errors.New("not a secret"), "Failure on delete event") + return } if err := s.queue(ctx, secret, q); err != nil { ctrl.LoggerFrom(ctx).V(5).Error(err, "Failure on delete event", "secret", klog.KObj(event.Object)) @@ -333,7 +337,8 @@ func (s *secretHandler) Delete(ctx context.Context, event event.DeleteEvent, q w func (s *secretHandler) Generic(ctx context.Context, event event.GenericEvent, q workqueue.RateLimitingInterface) { secret, isSecret := event.Object.(*corev1.Secret) if !isSecret { - ctrl.LoggerFrom(ctx).V(5).Info("Failure on generic event, not a secret") + ctrl.LoggerFrom(ctx).V(5).Error(errors.New("not a secret"), "Failure on generic event") + return } if err := s.queue(ctx, secret, q); err != nil { ctrl.LoggerFrom(ctx).V(5).Error(err, "Failure on generic event", "secret", klog.KObj(event.Object))