From 37bec882e1fa56b590356af659248765a20027e7 Mon Sep 17 00:00:00 2001 From: Alexander Braverman Masis Date: Sun, 2 Jan 2022 10:10:38 +0200 Subject: [PATCH 01/27] ClusterPool Inventory --- apis/hive/v1/clusterdeployment_types.go | 6 + .../clusterdeploymentcustomization_types.go | 96 +++++ apis/hive/v1/clusterpool_types.go | 30 ++ apis/hive/v1/zz_generated.deepcopy.go | 170 ++++++++ cmd/hiveadmission/main.go | 1 + ...ft.io_clusterdeploymentcustomizations.yaml | 148 +++++++ .../hive.openshift.io_clusterdeployments.yaml | 10 + .../crds/hive.openshift.io_clusterpools.yaml | 29 ++ hack/app-sre/kustomization.yaml | 1 + hack/app-sre/saas-template.yaml | 190 +++++++++ .../hive/v1/clusterdeploymentcustomization.go | 162 ++++++++ .../fake_clusterdeploymentcustomization.go | 114 ++++++ .../typed/hive/v1/fake/fake_hive_client.go | 4 + .../typed/hive/v1/generated_expansion.go | 2 + .../versioned/typed/hive/v1/hive_client.go | 5 + .../informers/externalversions/generic.go | 2 + .../hive/v1/clusterdeploymentcustomization.go | 74 ++++ .../externalversions/hive/v1/interface.go | 7 + .../hive/v1/clusterdeploymentcustomization.go | 83 ++++ .../listers/hive/v1/expansion_generated.go | 8 + pkg/clusterresource/openstack.go | 22 +- .../clusterdeployment_controller.go | 81 +++- .../clusterpool/clusterpool_controller.go | 377 +++++++++++++++++- .../clusterpool_controller_test.go | 108 ++++- pkg/controller/clusterpool/collections.go | 13 + .../hibernation/hibernation_controller.go | 14 +- .../hibernation_controller_test.go | 24 ++ pkg/controller/utils/clusterdeployment.go | 6 + pkg/controller/utils/conditions.go | 53 +++ .../clusterdeployment/clusterdeployment.go | 7 + .../clusterdeploymentcustomization.go | 73 ++++ pkg/test/clusterpool/clusterpool.go | 6 + ...customization_validating_admission_hook.go | 285 +++++++++++++ .../clusterpool_validating_admission_hook.go | 16 + .../apis/hive/v1/clusterdeployment_types.go | 6 + .../clusterdeploymentcustomization_types.go | 96 +++++ .../hive/apis/hive/v1/clusterpool_types.go | 30 ++ .../apis/hive/v1/zz_generated.deepcopy.go | 170 ++++++++ 38 files changed, 2516 insertions(+), 13 deletions(-) create mode 100644 apis/hive/v1/clusterdeploymentcustomization_types.go create mode 100644 config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml create mode 100644 pkg/client/clientset/versioned/typed/hive/v1/clusterdeploymentcustomization.go create mode 100644 pkg/client/clientset/versioned/typed/hive/v1/fake/fake_clusterdeploymentcustomization.go create mode 100644 pkg/client/informers/externalversions/hive/v1/clusterdeploymentcustomization.go create mode 100644 pkg/client/listers/hive/v1/clusterdeploymentcustomization.go create mode 100644 pkg/test/clusterdeploymentcustomization/clusterdeploymentcustomization.go create mode 100644 pkg/validating-webhooks/hive/v1/clusterdeploymentcustomization_validating_admission_hook.go create mode 100644 vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go diff --git a/apis/hive/v1/clusterdeployment_types.go b/apis/hive/v1/clusterdeployment_types.go index 997af11f194..a8df72decb5 100644 --- a/apis/hive/v1/clusterdeployment_types.go +++ b/apis/hive/v1/clusterdeployment_types.go @@ -52,6 +52,10 @@ const ( // FinalizerArgoCDCluster is used on ClusterDeployments to ensure we clean up the ArgoCD cluster // secret before cleaning up the API object. FinalizerArgoCDCluster = "hive.openshift.io/argocd-cluster" + + // FinalizerCustomizationRelease is used on ClusterDeployments to ensure we run a successful + // release of ClusterDeploymentCustomization + FinalizerCustomizationRelease string = "hive.openshift.io/customization" ) // ClusterPowerState is used to indicate whether a cluster is running or in a @@ -276,6 +280,8 @@ type ClusterPoolReference struct { // ClaimedTimestamp is the time this cluster was assigned to a ClusterClaim. This is only used for // ClusterDeployments belonging to ClusterPools. ClaimedTimestamp *metav1.Time `json:"claimedTimestamp,omitempty"` + // ClusterDeploymentCustomizationRef is the ClusterPool Inventory claimed customization for this ClusterDeployment + ClusterDeploymentCustomizationRef *corev1.LocalObjectReference `json:"clusterDeploymentCustomization"` } // ClusterMetadata contains metadata information about the installed cluster. diff --git a/apis/hive/v1/clusterdeploymentcustomization_types.go b/apis/hive/v1/clusterdeploymentcustomization_types.go new file mode 100644 index 00000000000..cf738bc9a59 --- /dev/null +++ b/apis/hive/v1/clusterdeploymentcustomization_types.go @@ -0,0 +1,96 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:noStatus +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterDeploymentCustomization is the Schema for clusterdeploymentcustomizations API +// +kubebuilder:subresource:status +// +k8s:openapi-gen=true +// +kubebuilder:resource:scope=Namespaced +type ClusterDeploymentCustomization struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ClusterDeploymentCustomizationSpec `json:"spec"` + Status ClusterDeploymentCustomizationStatus `json:"status,omitempty"` +} + +// ClusterDeploymentCustomizationSpec defines the desired state of ClusterDeploymentCustomization +type ClusterDeploymentCustomizationSpec struct { + // TODO: documentation + InstallConfigPatches []PatchEntity `json:"installConfigPatches,omitempty"` +} + +// TODO: documentation +type PatchEntity struct { + // +required + Op string `json:"op"` + // +required + Path string `json:"path"` + // +required + Value string `json:"value"` +} + +// ClusterDeploymentCustomizationStatus defines the observed state of ClusterDeploymentCustomization +type ClusterDeploymentCustomizationStatus struct { + // TODO: documentation + // +optional + ClusterDeploymentRef *corev1.ObjectReference `json:"clusterDeploymentRef,omitempty"` + + // +optional + LastApplyTime metav1.Time `json:"lastApplyTime,omitempty"` + + // +optional + LastApplyStatus string `json:"lastApplyStatus,omitempty"` + + // Conditions includes more detailed status for the cluster deployment customization status. + // +optional + Conditions []ClusterDeploymentCustomizationCondition `json:"conditions,omitempty"` +} + +type ClusterDeploymentCustomizationCondition struct { + // Type is the type of the condition. + Type ClusterDeploymentCustomizationConditionType `json:"type"` + // Status is the status of the condition. + Status corev1.ConditionStatus `json:"status"` + // LastProbeTime is the last time we probed the condition. + // +optional + LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"` + // LastTransitionTime is the last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + // Reason is a unique, one-word, CamelCase reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty"` + // Message is a human-readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty"` +} + +// ClusterDeploymentCustomizationConditionType is a valid value for ClusterDeploymentCustomizationCondition.Type +type ClusterDeploymentCustomizationConditionType string + +const ( + // TODO: add more types + // TODO: shorter name? + ClusterDeploymentCustomizationAvailableCondition ClusterDeploymentCustomizationConditionType = "Available" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterDeploymentCustomizationLis contains the list of ClusterDeploymentCustomization +type ClusterDeploymentCustomizationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []ClusterDeploymentCustomization `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ClusterDeploymentCustomization{}, &ClusterDeploymentCustomizationList{}) +} diff --git a/apis/hive/v1/clusterpool_types.go b/apis/hive/v1/clusterpool_types.go index 71e394a212c..c1bafa09a0f 100644 --- a/apis/hive/v1/clusterpool_types.go +++ b/apis/hive/v1/clusterpool_types.go @@ -92,6 +92,18 @@ type ClusterPoolSpec struct { // HibernationConfig configures the hibernation/resume behavior of ClusterDeployments owned by the ClusterPool. // +optional HibernationConfig *HibernationConfig `json:"hibernationConfig"` + + // Inventory maintains a list entries consumed by the clusterpool + // to customize the default the cluster deployment + // +optional + Inventory []InventoryEntry `json:"inventory,omitempty"` + + // InventoryAttempts is the number of attempts to provision a ClusterDeployment with a given inventory entry. + // On a successful provision, the inventory entry attempts status is updated to this value. + // Negative InventoryAttempts means unlimited attempts, and recommended only for debugging purposes. + // Default number of InventoryAttempts is 5. + // +optional + InventoryAttempts *int32 `json:"inventoryAttempts,omitempty"` } type HibernationConfig struct { @@ -110,6 +122,22 @@ type HibernationConfig struct { ResumeTimeout metav1.Duration `json:"resumeTimeout"` } +// InventoryEntryKind in Kind of the inventory entry +// +kubebuilder:validation:Enum="";ClusterDeploymentCustomization +type InventoryEntryKind string + +const ClusterDeploymentCustomizationInventoryEntry InventoryEntryKind = "ClusterDeploymentCustomization" + +// InventoryEntry maintains a reference to a custom resource consumed by a clusterpool to customize the cluster deployment +type InventoryEntry struct { + // Kind denotes the kind of the referenced resource. The default is ClusterDeploymentCustomization, which is also currently the only supported value. + // +optional + Kind InventoryEntryKind `json:"kind,omitempty"` + // Name is the name of the referenced resource. + // +required + Name string `json:"name,omitempty"` +} + // ClusterPoolClaimLifetime defines the lifetimes for claims for the cluster pool. type ClusterPoolClaimLifetime struct { // Default is the default lifetime of the claim when no lifetime is set on the claim itself. @@ -197,6 +225,8 @@ const ( // ClusterPoolAllClustersCurrentCondition indicates whether all unassigned (installing or ready) // ClusterDeployments in the pool match the current configuration of the ClusterPool. ClusterPoolAllClustersCurrentCondition ClusterPoolConditionType = "AllClustersCurrent" + // ClusterPoolInventoryValidCondition is set to provide information on whether the cluster pool inventory is valid + ClusterPoolInventoryValidCondition ClusterPoolConditionType = "InventoryValid" ) // +genclient diff --git a/apis/hive/v1/zz_generated.deepcopy.go b/apis/hive/v1/zz_generated.deepcopy.go index 2f96e248910..a33a463adbb 100644 --- a/apis/hive/v1/zz_generated.deepcopy.go +++ b/apis/hive/v1/zz_generated.deepcopy.go @@ -676,6 +676,134 @@ func (in *ClusterDeploymentCondition) DeepCopy() *ClusterDeploymentCondition { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeploymentCustomization) DeepCopyInto(out *ClusterDeploymentCustomization) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentCustomization. +func (in *ClusterDeploymentCustomization) DeepCopy() *ClusterDeploymentCustomization { + if in == nil { + return nil + } + out := new(ClusterDeploymentCustomization) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterDeploymentCustomization) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeploymentCustomizationCondition) DeepCopyInto(out *ClusterDeploymentCustomizationCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentCustomizationCondition. +func (in *ClusterDeploymentCustomizationCondition) DeepCopy() *ClusterDeploymentCustomizationCondition { + if in == nil { + return nil + } + out := new(ClusterDeploymentCustomizationCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeploymentCustomizationList) DeepCopyInto(out *ClusterDeploymentCustomizationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterDeploymentCustomization, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentCustomizationList. +func (in *ClusterDeploymentCustomizationList) DeepCopy() *ClusterDeploymentCustomizationList { + if in == nil { + return nil + } + out := new(ClusterDeploymentCustomizationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterDeploymentCustomizationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeploymentCustomizationSpec) DeepCopyInto(out *ClusterDeploymentCustomizationSpec) { + *out = *in + if in.InstallConfigPatches != nil { + in, out := &in.InstallConfigPatches, &out.InstallConfigPatches + *out = make([]PatchEntity, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentCustomizationSpec. +func (in *ClusterDeploymentCustomizationSpec) DeepCopy() *ClusterDeploymentCustomizationSpec { + if in == nil { + return nil + } + out := new(ClusterDeploymentCustomizationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeploymentCustomizationStatus) DeepCopyInto(out *ClusterDeploymentCustomizationStatus) { + *out = *in + if in.ClusterDeploymentRef != nil { + in, out := &in.ClusterDeploymentRef, &out.ClusterDeploymentRef + *out = new(corev1.ObjectReference) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ClusterDeploymentCustomizationCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentCustomizationStatus. +func (in *ClusterDeploymentCustomizationStatus) DeepCopy() *ClusterDeploymentCustomizationStatus { + if in == nil { + return nil + } + out := new(ClusterDeploymentCustomizationStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterDeploymentList) DeepCopyInto(out *ClusterDeploymentList) { *out = *in @@ -1341,6 +1469,11 @@ func (in *ClusterPoolReference) DeepCopyInto(out *ClusterPoolReference) { in, out := &in.ClaimedTimestamp, &out.ClaimedTimestamp *out = (*in).DeepCopy() } + if in.ClusterDeploymentCustomizationRef != nil { + in, out := &in.ClusterDeploymentCustomizationRef, &out.ClusterDeploymentCustomizationRef + *out = new(corev1.LocalObjectReference) + **out = **in + } return } @@ -1413,6 +1546,11 @@ func (in *ClusterPoolSpec) DeepCopyInto(out *ClusterPoolSpec) { *out = new(HibernationConfig) **out = **in } + if in.Inventory != nil { + in, out := &in.Inventory, &out.Inventory + *out = make([]InventoryEntry, len(*in)) + copy(*out, *in) + } return } @@ -2500,6 +2638,22 @@ func (in *IdentityProviderStatus) DeepCopy() *IdentityProviderStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InventoryEntry) DeepCopyInto(out *InventoryEntry) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InventoryEntry. +func (in *InventoryEntry) DeepCopy() *InventoryEntry { + if in == nil { + return nil + } + out := new(InventoryEntry) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KubeconfigSecretReference) DeepCopyInto(out *KubeconfigSecretReference) { *out = *in @@ -2989,6 +3143,22 @@ func (in *OvirtClusterDeprovision) DeepCopy() *OvirtClusterDeprovision { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PatchEntity) DeepCopyInto(out *PatchEntity) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PatchEntity. +func (in *PatchEntity) DeepCopy() *PatchEntity { + if in == nil { + return nil + } + out := new(PatchEntity) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Platform) DeepCopyInto(out *Platform) { *out = *in diff --git a/cmd/hiveadmission/main.go b/cmd/hiveadmission/main.go index 3c8c243f23f..45fe777814c 100644 --- a/cmd/hiveadmission/main.go +++ b/cmd/hiveadmission/main.go @@ -30,6 +30,7 @@ func main() { hivevalidatingwebhooks.NewMachinePoolValidatingAdmissionHook(decoder), hivevalidatingwebhooks.NewSyncSetValidatingAdmissionHook(decoder), hivevalidatingwebhooks.NewSelectorSyncSetValidatingAdmissionHook(decoder), + hivevalidatingwebhooks.NewClusterDeploymentCustomizationValidatingAdmissionHook(decoder), ) } diff --git a/config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml b/config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml new file mode 100644 index 00000000000..536bf0f8ec4 --- /dev/null +++ b/config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml @@ -0,0 +1,148 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.6.0 + creationTimestamp: null + name: clusterdeploymentcustomizations.hive.openshift.io +spec: + group: hive.openshift.io + names: + kind: ClusterDeploymentCustomization + listKind: ClusterDeploymentCustomizationList + plural: clusterdeploymentcustomizations + singular: clusterdeploymentcustomization + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: ClusterDeploymentCustomization is the Schema for clusterdeploymentcustomizations + API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ClusterDeploymentCustomizationSpec defines the desired state + of ClusterDeploymentCustomization + properties: + installConfigPatches: + description: 'TODO: documentation' + items: + description: 'TODO: documentation' + properties: + op: + type: string + path: + type: string + value: + type: string + required: + - op + - path + - value + type: object + type: array + type: object + status: + description: ClusterDeploymentCustomizationStatus defines the observed + state of ClusterDeploymentCustomization + properties: + clusterDeploymentRef: + description: 'TODO: documentation' + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + conditions: + description: Conditions includes more detailed status for the cluster + deployment customization status. + items: + properties: + lastProbeTime: + description: LastProbeTime is the last time we probed the condition. + format: date-time + type: string + lastTransitionTime: + description: LastTransitionTime is the last time the condition + transitioned from one status to another. + format: date-time + type: string + message: + description: Message is a human-readable message indicating + details about last transition. + type: string + reason: + description: Reason is a unique, one-word, CamelCase reason + for the condition's last transition. + type: string + status: + description: Status is the status of the condition. + type: string + type: + description: Type is the type of the condition. + type: string + required: + - status + - type + type: object + type: array + lastApplyStatus: + type: string + lastApplyTime: + format: date-time + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crds/hive.openshift.io_clusterdeployments.yaml b/config/crds/hive.openshift.io_clusterdeployments.yaml index 54445dd4adb..fcee301eb70 100644 --- a/config/crds/hive.openshift.io_clusterdeployments.yaml +++ b/config/crds/hive.openshift.io_clusterdeployments.yaml @@ -187,6 +187,15 @@ spec: belonging to ClusterPools. format: date-time type: string + clusterDeploymentCustomization: + description: ClusterDeploymentCustomizationRef is the ClusterPool + Inventory claimed customization for this ClusterDeployment + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object namespace: description: Namespace is the namespace where the ClusterPool resides. @@ -196,6 +205,7 @@ spec: the cluster was created. type: string required: + - clusterDeploymentCustomization - namespace - poolName type: object diff --git a/config/crds/hive.openshift.io_clusterpools.yaml b/config/crds/hive.openshift.io_clusterpools.yaml index afcc377be59..24f1f882d23 100644 --- a/config/crds/hive.openshift.io_clusterpools.yaml +++ b/config/crds/hive.openshift.io_clusterpools.yaml @@ -152,6 +152,35 @@ spec: TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object + inventory: + description: Inventory maintains a list entries consumed by the clusterpool + to customize the default the cluster deployment + items: + description: InventoryEntry maintains a reference to a custom resource + consumed by a clusterpool to customize the cluster deployment + properties: + kind: + description: Kind denotes the kind of the referenced resource. + The default is ClusterDeploymentCustomization, which is also + currently the only supported value. + enum: + - "" + - ClusterDeploymentCustomization + type: string + name: + description: Name is the name of the referenced resource. + type: string + type: object + type: array + inventoryAttempts: + description: InventoryAttempts is the number of attempts to provision + a ClusterDeployment with a given inventory entry. On a successful + provision, the inventory entry attempts status is updated to this + value. Negative InventoryAttempts means unlimited attempts, and + recommended only for debugging purposes. Default number of InventoryAttempts + is 5. + format: int32 + type: integer labels: additionalProperties: type: string diff --git a/hack/app-sre/kustomization.yaml b/hack/app-sre/kustomization.yaml index 5941b588da1..9c3ff4d672f 100644 --- a/hack/app-sre/kustomization.yaml +++ b/hack/app-sre/kustomization.yaml @@ -29,6 +29,7 @@ resources: - ../../config/crds/hive.openshift.io_selectorsyncsets.yaml - ../../config/crds/hive.openshift.io_syncidentityproviders.yaml - ../../config/crds/hive.openshift.io_syncsets.yaml +- ../../config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization # Use app-sre-supplied variables to pull the image for the current commit diff --git a/hack/app-sre/saas-template.yaml b/hack/app-sre/saas-template.yaml index b914587293c..081d629f19a 100644 --- a/hack/app-sre/saas-template.yaml +++ b/hack/app-sre/saas-template.yaml @@ -251,6 +251,156 @@ objects: plural: '' conditions: [] storedVersions: [] +- apiVersion: apiextensions.k8s.io/v1 + kind: CustomResourceDefinition + metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.6.0 + creationTimestamp: null + name: clusterdeploymentcustomizations.hive.openshift.io + spec: + group: hive.openshift.io + names: + kind: ClusterDeploymentCustomization + listKind: ClusterDeploymentCustomizationList + plural: clusterdeploymentcustomizations + singular: clusterdeploymentcustomization + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: ClusterDeploymentCustomization is the Schema for clusterdeploymentcustomizations + API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint the + client submits requests to. Cannot be updated. In CamelCase. More + info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ClusterDeploymentCustomizationSpec defines the desired + state of ClusterDeploymentCustomization + properties: + installConfigPatches: + description: 'TODO: documentation' + items: + description: 'TODO: documentation' + properties: + op: + type: string + path: + type: string + value: + type: string + required: + - op + - path + - value + type: object + type: array + type: object + status: + description: ClusterDeploymentCustomizationStatus defines the observed + state of ClusterDeploymentCustomization + properties: + clusterDeploymentRef: + description: 'TODO: documentation' + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + conditions: + description: Conditions includes more detailed status for the cluster + deployment customization status. + items: + properties: + lastProbeTime: + description: LastProbeTime is the last time we probed the + condition. + format: date-time + type: string + lastTransitionTime: + description: LastTransitionTime is the last time the condition + transitioned from one status to another. + format: date-time + type: string + message: + description: Message is a human-readable message indicating + details about last transition. + type: string + reason: + description: Reason is a unique, one-word, CamelCase reason + for the condition's last transition. + type: string + status: + description: Status is the status of the condition. + type: string + type: + description: Type is the type of the condition. + type: string + required: + - status + - type + type: object + type: array + lastApplyStatus: + type: string + lastApplyTime: + format: date-time + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + status: + acceptedNames: + kind: '' + plural: '' + conditions: [] + storedVersions: [] - apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: @@ -442,6 +592,15 @@ objects: belonging to ClusterPools. format: date-time type: string + clusterDeploymentCustomization: + description: ClusterDeploymentCustomizationRef is the ClusterPool + Inventory claimed customization for this ClusterDeployment + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object namespace: description: Namespace is the namespace where the ClusterPool resides. @@ -451,6 +610,7 @@ objects: the cluster was created. type: string required: + - clusterDeploymentCustomization - namespace - poolName type: object @@ -1967,6 +2127,36 @@ objects: TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object + inventory: + description: Inventory maintains a list entries consumed by the + clusterpool to customize the default the cluster deployment + items: + description: InventoryEntry maintains a reference to a custom + resource consumed by a clusterpool to customize the cluster + deployment + properties: + kind: + description: Kind denotes the kind of the referenced resource. + The default is ClusterDeploymentCustomization, which is + also currently the only supported value. + enum: + - '' + - ClusterDeploymentCustomization + type: string + name: + description: Name is the name of the referenced resource. + type: string + type: object + type: array + inventoryAttempts: + description: InventoryAttempts is the number of attempts to provision + a ClusterDeployment with a given inventory entry. On a successful + provision, the inventory entry attempts status is updated to this + value. Negative InventoryAttempts means unlimited attempts, and + recommended only for debugging purposes. Default number of InventoryAttempts + is 5. + format: int32 + type: integer labels: additionalProperties: type: string diff --git a/pkg/client/clientset/versioned/typed/hive/v1/clusterdeploymentcustomization.go b/pkg/client/clientset/versioned/typed/hive/v1/clusterdeploymentcustomization.go new file mode 100644 index 00000000000..dd7ce990a81 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/hive/v1/clusterdeploymentcustomization.go @@ -0,0 +1,162 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/openshift/hive/apis/hive/v1" + scheme "github.com/openshift/hive/pkg/client/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ClusterDeploymentCustomizationsGetter has a method to return a ClusterDeploymentCustomizationInterface. +// A group's client should implement this interface. +type ClusterDeploymentCustomizationsGetter interface { + ClusterDeploymentCustomizations(namespace string) ClusterDeploymentCustomizationInterface +} + +// ClusterDeploymentCustomizationInterface has methods to work with ClusterDeploymentCustomization resources. +type ClusterDeploymentCustomizationInterface interface { + Create(ctx context.Context, clusterDeploymentCustomization *v1.ClusterDeploymentCustomization, opts metav1.CreateOptions) (*v1.ClusterDeploymentCustomization, error) + Update(ctx context.Context, clusterDeploymentCustomization *v1.ClusterDeploymentCustomization, opts metav1.UpdateOptions) (*v1.ClusterDeploymentCustomization, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ClusterDeploymentCustomization, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.ClusterDeploymentCustomizationList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterDeploymentCustomization, err error) + ClusterDeploymentCustomizationExpansion +} + +// clusterDeploymentCustomizations implements ClusterDeploymentCustomizationInterface +type clusterDeploymentCustomizations struct { + client rest.Interface + ns string +} + +// newClusterDeploymentCustomizations returns a ClusterDeploymentCustomizations +func newClusterDeploymentCustomizations(c *HiveV1Client, namespace string) *clusterDeploymentCustomizations { + return &clusterDeploymentCustomizations{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the clusterDeploymentCustomization, and returns the corresponding clusterDeploymentCustomization object, and an error if there is any. +func (c *clusterDeploymentCustomizations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterDeploymentCustomization, err error) { + result = &v1.ClusterDeploymentCustomization{} + err = c.client.Get(). + Namespace(c.ns). + Resource("clusterdeploymentcustomizations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterDeploymentCustomizations that match those selectors. +func (c *clusterDeploymentCustomizations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterDeploymentCustomizationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.ClusterDeploymentCustomizationList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("clusterdeploymentcustomizations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterDeploymentCustomizations. +func (c *clusterDeploymentCustomizations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("clusterdeploymentcustomizations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a clusterDeploymentCustomization and creates it. Returns the server's representation of the clusterDeploymentCustomization, and an error, if there is any. +func (c *clusterDeploymentCustomizations) Create(ctx context.Context, clusterDeploymentCustomization *v1.ClusterDeploymentCustomization, opts metav1.CreateOptions) (result *v1.ClusterDeploymentCustomization, err error) { + result = &v1.ClusterDeploymentCustomization{} + err = c.client.Post(). + Namespace(c.ns). + Resource("clusterdeploymentcustomizations"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(clusterDeploymentCustomization). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a clusterDeploymentCustomization and updates it. Returns the server's representation of the clusterDeploymentCustomization, and an error, if there is any. +func (c *clusterDeploymentCustomizations) Update(ctx context.Context, clusterDeploymentCustomization *v1.ClusterDeploymentCustomization, opts metav1.UpdateOptions) (result *v1.ClusterDeploymentCustomization, err error) { + result = &v1.ClusterDeploymentCustomization{} + err = c.client.Put(). + Namespace(c.ns). + Resource("clusterdeploymentcustomizations"). + Name(clusterDeploymentCustomization.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(clusterDeploymentCustomization). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the clusterDeploymentCustomization and deletes it. Returns an error if one occurs. +func (c *clusterDeploymentCustomizations) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("clusterdeploymentcustomizations"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterDeploymentCustomizations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("clusterdeploymentcustomizations"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched clusterDeploymentCustomization. +func (c *clusterDeploymentCustomizations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterDeploymentCustomization, err error) { + result = &v1.ClusterDeploymentCustomization{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("clusterdeploymentcustomizations"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/pkg/client/clientset/versioned/typed/hive/v1/fake/fake_clusterdeploymentcustomization.go b/pkg/client/clientset/versioned/typed/hive/v1/fake/fake_clusterdeploymentcustomization.go new file mode 100644 index 00000000000..e775114dae9 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/hive/v1/fake/fake_clusterdeploymentcustomization.go @@ -0,0 +1,114 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + hivev1 "github.com/openshift/hive/apis/hive/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeClusterDeploymentCustomizations implements ClusterDeploymentCustomizationInterface +type FakeClusterDeploymentCustomizations struct { + Fake *FakeHiveV1 + ns string +} + +var clusterdeploymentcustomizationsResource = schema.GroupVersionResource{Group: "hive.openshift.io", Version: "v1", Resource: "clusterdeploymentcustomizations"} + +var clusterdeploymentcustomizationsKind = schema.GroupVersionKind{Group: "hive.openshift.io", Version: "v1", Kind: "ClusterDeploymentCustomization"} + +// Get takes name of the clusterDeploymentCustomization, and returns the corresponding clusterDeploymentCustomization object, and an error if there is any. +func (c *FakeClusterDeploymentCustomizations) Get(ctx context.Context, name string, options v1.GetOptions) (result *hivev1.ClusterDeploymentCustomization, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(clusterdeploymentcustomizationsResource, c.ns, name), &hivev1.ClusterDeploymentCustomization{}) + + if obj == nil { + return nil, err + } + return obj.(*hivev1.ClusterDeploymentCustomization), err +} + +// List takes label and field selectors, and returns the list of ClusterDeploymentCustomizations that match those selectors. +func (c *FakeClusterDeploymentCustomizations) List(ctx context.Context, opts v1.ListOptions) (result *hivev1.ClusterDeploymentCustomizationList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(clusterdeploymentcustomizationsResource, clusterdeploymentcustomizationsKind, c.ns, opts), &hivev1.ClusterDeploymentCustomizationList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &hivev1.ClusterDeploymentCustomizationList{ListMeta: obj.(*hivev1.ClusterDeploymentCustomizationList).ListMeta} + for _, item := range obj.(*hivev1.ClusterDeploymentCustomizationList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested clusterDeploymentCustomizations. +func (c *FakeClusterDeploymentCustomizations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(clusterdeploymentcustomizationsResource, c.ns, opts)) + +} + +// Create takes the representation of a clusterDeploymentCustomization and creates it. Returns the server's representation of the clusterDeploymentCustomization, and an error, if there is any. +func (c *FakeClusterDeploymentCustomizations) Create(ctx context.Context, clusterDeploymentCustomization *hivev1.ClusterDeploymentCustomization, opts v1.CreateOptions) (result *hivev1.ClusterDeploymentCustomization, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(clusterdeploymentcustomizationsResource, c.ns, clusterDeploymentCustomization), &hivev1.ClusterDeploymentCustomization{}) + + if obj == nil { + return nil, err + } + return obj.(*hivev1.ClusterDeploymentCustomization), err +} + +// Update takes the representation of a clusterDeploymentCustomization and updates it. Returns the server's representation of the clusterDeploymentCustomization, and an error, if there is any. +func (c *FakeClusterDeploymentCustomizations) Update(ctx context.Context, clusterDeploymentCustomization *hivev1.ClusterDeploymentCustomization, opts v1.UpdateOptions) (result *hivev1.ClusterDeploymentCustomization, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(clusterdeploymentcustomizationsResource, c.ns, clusterDeploymentCustomization), &hivev1.ClusterDeploymentCustomization{}) + + if obj == nil { + return nil, err + } + return obj.(*hivev1.ClusterDeploymentCustomization), err +} + +// Delete takes name of the clusterDeploymentCustomization and deletes it. Returns an error if one occurs. +func (c *FakeClusterDeploymentCustomizations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(clusterdeploymentcustomizationsResource, c.ns, name, opts), &hivev1.ClusterDeploymentCustomization{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeClusterDeploymentCustomizations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(clusterdeploymentcustomizationsResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &hivev1.ClusterDeploymentCustomizationList{}) + return err +} + +// Patch applies the patch and returns the patched clusterDeploymentCustomization. +func (c *FakeClusterDeploymentCustomizations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *hivev1.ClusterDeploymentCustomization, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(clusterdeploymentcustomizationsResource, c.ns, name, pt, data, subresources...), &hivev1.ClusterDeploymentCustomization{}) + + if obj == nil { + return nil, err + } + return obj.(*hivev1.ClusterDeploymentCustomization), err +} diff --git a/pkg/client/clientset/versioned/typed/hive/v1/fake/fake_hive_client.go b/pkg/client/clientset/versioned/typed/hive/v1/fake/fake_hive_client.go index d52a518c108..06f12520fe7 100644 --- a/pkg/client/clientset/versioned/typed/hive/v1/fake/fake_hive_client.go +++ b/pkg/client/clientset/versioned/typed/hive/v1/fake/fake_hive_client.go @@ -24,6 +24,10 @@ func (c *FakeHiveV1) ClusterDeployments(namespace string) v1.ClusterDeploymentIn return &FakeClusterDeployments{c, namespace} } +func (c *FakeHiveV1) ClusterDeploymentCustomizations(namespace string) v1.ClusterDeploymentCustomizationInterface { + return &FakeClusterDeploymentCustomizations{c, namespace} +} + func (c *FakeHiveV1) ClusterDeprovisions(namespace string) v1.ClusterDeprovisionInterface { return &FakeClusterDeprovisions{c, namespace} } diff --git a/pkg/client/clientset/versioned/typed/hive/v1/generated_expansion.go b/pkg/client/clientset/versioned/typed/hive/v1/generated_expansion.go index 600401a271d..951ab87652b 100644 --- a/pkg/client/clientset/versioned/typed/hive/v1/generated_expansion.go +++ b/pkg/client/clientset/versioned/typed/hive/v1/generated_expansion.go @@ -8,6 +8,8 @@ type ClusterClaimExpansion interface{} type ClusterDeploymentExpansion interface{} +type ClusterDeploymentCustomizationExpansion interface{} + type ClusterDeprovisionExpansion interface{} type ClusterImageSetExpansion interface{} diff --git a/pkg/client/clientset/versioned/typed/hive/v1/hive_client.go b/pkg/client/clientset/versioned/typed/hive/v1/hive_client.go index 0652d984747..e0ea9fe9f1a 100644 --- a/pkg/client/clientset/versioned/typed/hive/v1/hive_client.go +++ b/pkg/client/clientset/versioned/typed/hive/v1/hive_client.go @@ -15,6 +15,7 @@ type HiveV1Interface interface { CheckpointsGetter ClusterClaimsGetter ClusterDeploymentsGetter + ClusterDeploymentCustomizationsGetter ClusterDeprovisionsGetter ClusterImageSetsGetter ClusterPoolsGetter @@ -48,6 +49,10 @@ func (c *HiveV1Client) ClusterDeployments(namespace string) ClusterDeploymentInt return newClusterDeployments(c, namespace) } +func (c *HiveV1Client) ClusterDeploymentCustomizations(namespace string) ClusterDeploymentCustomizationInterface { + return newClusterDeploymentCustomizations(c, namespace) +} + func (c *HiveV1Client) ClusterDeprovisions(namespace string) ClusterDeprovisionInterface { return newClusterDeprovisions(c, namespace) } diff --git a/pkg/client/informers/externalversions/generic.go b/pkg/client/informers/externalversions/generic.go index 71f7068525e..296f8270454 100644 --- a/pkg/client/informers/externalversions/generic.go +++ b/pkg/client/informers/externalversions/generic.go @@ -44,6 +44,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Hive().V1().ClusterClaims().Informer()}, nil case v1.SchemeGroupVersion.WithResource("clusterdeployments"): return &genericInformer{resource: resource.GroupResource(), informer: f.Hive().V1().ClusterDeployments().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("clusterdeploymentcustomizations"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Hive().V1().ClusterDeploymentCustomizations().Informer()}, nil case v1.SchemeGroupVersion.WithResource("clusterdeprovisions"): return &genericInformer{resource: resource.GroupResource(), informer: f.Hive().V1().ClusterDeprovisions().Informer()}, nil case v1.SchemeGroupVersion.WithResource("clusterimagesets"): diff --git a/pkg/client/informers/externalversions/hive/v1/clusterdeploymentcustomization.go b/pkg/client/informers/externalversions/hive/v1/clusterdeploymentcustomization.go new file mode 100644 index 00000000000..637b3b707f7 --- /dev/null +++ b/pkg/client/informers/externalversions/hive/v1/clusterdeploymentcustomization.go @@ -0,0 +1,74 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + hivev1 "github.com/openshift/hive/apis/hive/v1" + versioned "github.com/openshift/hive/pkg/client/clientset/versioned" + internalinterfaces "github.com/openshift/hive/pkg/client/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/hive/pkg/client/listers/hive/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ClusterDeploymentCustomizationInformer provides access to a shared informer and lister for +// ClusterDeploymentCustomizations. +type ClusterDeploymentCustomizationInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ClusterDeploymentCustomizationLister +} + +type clusterDeploymentCustomizationInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewClusterDeploymentCustomizationInformer constructs a new informer for ClusterDeploymentCustomization type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewClusterDeploymentCustomizationInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterDeploymentCustomizationInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredClusterDeploymentCustomizationInformer constructs a new informer for ClusterDeploymentCustomization type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredClusterDeploymentCustomizationInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.HiveV1().ClusterDeploymentCustomizations(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.HiveV1().ClusterDeploymentCustomizations(namespace).Watch(context.TODO(), options) + }, + }, + &hivev1.ClusterDeploymentCustomization{}, + resyncPeriod, + indexers, + ) +} + +func (f *clusterDeploymentCustomizationInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterDeploymentCustomizationInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *clusterDeploymentCustomizationInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&hivev1.ClusterDeploymentCustomization{}, f.defaultInformer) +} + +func (f *clusterDeploymentCustomizationInformer) Lister() v1.ClusterDeploymentCustomizationLister { + return v1.NewClusterDeploymentCustomizationLister(f.Informer().GetIndexer()) +} diff --git a/pkg/client/informers/externalversions/hive/v1/interface.go b/pkg/client/informers/externalversions/hive/v1/interface.go index d73a5da9ab5..0659201eae6 100644 --- a/pkg/client/informers/externalversions/hive/v1/interface.go +++ b/pkg/client/informers/externalversions/hive/v1/interface.go @@ -14,6 +14,8 @@ type Interface interface { ClusterClaims() ClusterClaimInformer // ClusterDeployments returns a ClusterDeploymentInformer. ClusterDeployments() ClusterDeploymentInformer + // ClusterDeploymentCustomizations returns a ClusterDeploymentCustomizationInformer. + ClusterDeploymentCustomizations() ClusterDeploymentCustomizationInformer // ClusterDeprovisions returns a ClusterDeprovisionInformer. ClusterDeprovisions() ClusterDeprovisionInformer // ClusterImageSets returns a ClusterImageSetInformer. @@ -70,6 +72,11 @@ func (v *version) ClusterDeployments() ClusterDeploymentInformer { return &clusterDeploymentInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } +// ClusterDeploymentCustomizations returns a ClusterDeploymentCustomizationInformer. +func (v *version) ClusterDeploymentCustomizations() ClusterDeploymentCustomizationInformer { + return &clusterDeploymentCustomizationInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + // ClusterDeprovisions returns a ClusterDeprovisionInformer. func (v *version) ClusterDeprovisions() ClusterDeprovisionInformer { return &clusterDeprovisionInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} diff --git a/pkg/client/listers/hive/v1/clusterdeploymentcustomization.go b/pkg/client/listers/hive/v1/clusterdeploymentcustomization.go new file mode 100644 index 00000000000..dc1ff4923ad --- /dev/null +++ b/pkg/client/listers/hive/v1/clusterdeploymentcustomization.go @@ -0,0 +1,83 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/hive/apis/hive/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ClusterDeploymentCustomizationLister helps list ClusterDeploymentCustomizations. +// All objects returned here must be treated as read-only. +type ClusterDeploymentCustomizationLister interface { + // List lists all ClusterDeploymentCustomizations in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ClusterDeploymentCustomization, err error) + // ClusterDeploymentCustomizations returns an object that can list and get ClusterDeploymentCustomizations. + ClusterDeploymentCustomizations(namespace string) ClusterDeploymentCustomizationNamespaceLister + ClusterDeploymentCustomizationListerExpansion +} + +// clusterDeploymentCustomizationLister implements the ClusterDeploymentCustomizationLister interface. +type clusterDeploymentCustomizationLister struct { + indexer cache.Indexer +} + +// NewClusterDeploymentCustomizationLister returns a new ClusterDeploymentCustomizationLister. +func NewClusterDeploymentCustomizationLister(indexer cache.Indexer) ClusterDeploymentCustomizationLister { + return &clusterDeploymentCustomizationLister{indexer: indexer} +} + +// List lists all ClusterDeploymentCustomizations in the indexer. +func (s *clusterDeploymentCustomizationLister) List(selector labels.Selector) (ret []*v1.ClusterDeploymentCustomization, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ClusterDeploymentCustomization)) + }) + return ret, err +} + +// ClusterDeploymentCustomizations returns an object that can list and get ClusterDeploymentCustomizations. +func (s *clusterDeploymentCustomizationLister) ClusterDeploymentCustomizations(namespace string) ClusterDeploymentCustomizationNamespaceLister { + return clusterDeploymentCustomizationNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ClusterDeploymentCustomizationNamespaceLister helps list and get ClusterDeploymentCustomizations. +// All objects returned here must be treated as read-only. +type ClusterDeploymentCustomizationNamespaceLister interface { + // List lists all ClusterDeploymentCustomizations in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ClusterDeploymentCustomization, err error) + // Get retrieves the ClusterDeploymentCustomization from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.ClusterDeploymentCustomization, error) + ClusterDeploymentCustomizationNamespaceListerExpansion +} + +// clusterDeploymentCustomizationNamespaceLister implements the ClusterDeploymentCustomizationNamespaceLister +// interface. +type clusterDeploymentCustomizationNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ClusterDeploymentCustomizations in the indexer for a given namespace. +func (s clusterDeploymentCustomizationNamespaceLister) List(selector labels.Selector) (ret []*v1.ClusterDeploymentCustomization, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ClusterDeploymentCustomization)) + }) + return ret, err +} + +// Get retrieves the ClusterDeploymentCustomization from the indexer for a given namespace and name. +func (s clusterDeploymentCustomizationNamespaceLister) Get(name string) (*v1.ClusterDeploymentCustomization, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("clusterdeploymentcustomization"), name) + } + return obj.(*v1.ClusterDeploymentCustomization), nil +} diff --git a/pkg/client/listers/hive/v1/expansion_generated.go b/pkg/client/listers/hive/v1/expansion_generated.go index 2f913b5fa55..b07f9b98377 100644 --- a/pkg/client/listers/hive/v1/expansion_generated.go +++ b/pkg/client/listers/hive/v1/expansion_generated.go @@ -26,6 +26,14 @@ type ClusterDeploymentListerExpansion interface{} // ClusterDeploymentNamespaceLister. type ClusterDeploymentNamespaceListerExpansion interface{} +// ClusterDeploymentCustomizationListerExpansion allows custom methods to be added to +// ClusterDeploymentCustomizationLister. +type ClusterDeploymentCustomizationListerExpansion interface{} + +// ClusterDeploymentCustomizationNamespaceListerExpansion allows custom methods to be added to +// ClusterDeploymentCustomizationNamespaceLister. +type ClusterDeploymentCustomizationNamespaceListerExpansion interface{} + // ClusterDeprovisionListerExpansion allows custom methods to be added to // ClusterDeprovisionLister. type ClusterDeprovisionListerExpansion interface{} diff --git a/pkg/clusterresource/openstack.go b/pkg/clusterresource/openstack.go index ee6be5fa338..123cbd2a17f 100644 --- a/pkg/clusterresource/openstack.go +++ b/pkg/clusterresource/openstack.go @@ -15,6 +15,12 @@ import ( "github.com/openshift/hive/pkg/constants" ) +const ( + computeFlavor = "m1.large" + masterFlavor = "ci.m4.xlarge" + externalNetwork = "provider_net_shared_3" +) + var _ CloudBuilder = (*OpenStackCloudBuilder)(nil) // OpenStackCloudBuilder encapsulates cluster artifact generation logic specific to OpenStack. @@ -43,6 +49,13 @@ type OpenStackCloudBuilder struct { MasterFlavor string } +func NewOpenStackCloudBuilderFromSecret(credsSecret *corev1.Secret) *OpenStackCloudBuilder { + cloudsYamlContent := credsSecret.Data[constants.OpenStackCredentialsName] + return &OpenStackCloudBuilder{ + CloudsYAMLContent: cloudsYamlContent, + } +} + func (p *OpenStackCloudBuilder) GenerateCredentialsSecret(o *Builder) *corev1.Secret { return &corev1.Secret{ TypeMeta: metav1.TypeMeta{ @@ -80,19 +93,18 @@ func (p *OpenStackCloudBuilder) addMachinePoolPlatform(o *Builder, mp *hivev1.Ma func (p *OpenStackCloudBuilder) addInstallConfigPlatform(o *Builder, ic *installertypes.InstallConfig) { ic.Platform = installertypes.Platform{ OpenStack: &installeropenstack.Platform{ - Cloud: p.Cloud, - ExternalNetwork: p.ExternalNetwork, - DeprecatedFlavorName: p.ComputeFlavor, + ExternalNetwork: externalNetwork, + DeprecatedFlavorName: computeFlavor, APIFloatingIP: p.APIFloatingIP, IngressFloatingIP: p.IngressFloatingIP, }, } ic.Compute[0].Platform.OpenStack = &installeropenstack.MachinePool{ - FlavorName: p.ComputeFlavor, + FlavorName: computeFlavor, } ic.ControlPlane.Platform.OpenStack = &installeropenstack.MachinePool{ - FlavorName: p.MasterFlavor, + FlavorName: masterFlavor, } } diff --git a/pkg/controller/clusterdeployment/clusterdeployment_controller.go b/pkg/controller/clusterdeployment/clusterdeployment_controller.go index 30c53149841..5e1dbb0c5cb 100644 --- a/pkg/controller/clusterdeployment/clusterdeployment_controller.go +++ b/pkg/controller/clusterdeployment/clusterdeployment_controller.go @@ -564,13 +564,21 @@ func (r *ReconcileClusterDeployment) reconcile(request reconcile.Request, cd *hi if !controllerutils.HasFinalizer(cd, hivev1.FinalizerDeprovision) { cdLog.Debugf("adding clusterdeployment finalizer") if err := r.addClusterDeploymentFinalizer(cd); err != nil { - cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error adding finalizer") + cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error adding deprovision finalizer") return reconcile.Result{}, err } metricClustersCreated.WithLabelValues(hivemetrics.GetClusterDeploymentType(cd)).Inc() return reconcile.Result{}, nil } + if cd.Spec.ClusterPoolRef != nil && cd.Spec.ClusterPoolRef.ClusterDeploymentCustomizationRef != nil && !controllerutils.HasFinalizer(cd, hivev1.FinalizerCustomizationRelease) { + cdLog.Debugf("adding customization finalizer") + if err := r.addClusterDeploymentCustomizationFinalizer(cd); err != nil { + cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error adding customization finalizer") + return reconcile.Result{}, err + } + } + if cd.Spec.ManageDNS { updated, result, err := r.ensureManagedDNSZone(cd, cdLog) if updated || err != nil { @@ -1184,6 +1192,19 @@ func (r *ReconcileClusterDeployment) setClusterStatusURLs(cd *hivev1.ClusterDepl return reconcile.Result{}, nil } +func (r *ReconcileClusterDeployment) ensureClusterDeploymentCustomizationIsReleased(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (gone bool, returnErr error) { + if cd.Spec.ClusterPoolRef == nil || cd.Spec.ClusterPoolRef.ClusterDeploymentCustomizationRef == nil { + return true, nil + } + + if err := r.releaseClusterDeploymentCustomization(cd, cdLog); err != nil { + cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error releasing inventory customization") + return false, err + } + + return true, nil +} + // ensureManagedDNSZoneDeleted is a safety check to ensure that the child managed DNSZone // linked to the parent cluster deployment gets a deletionTimestamp when the parent is deleted. // Normally we expect Kube garbage collection to do this for us, but in rare cases we've seen it @@ -1383,13 +1404,23 @@ func (r *ReconcileClusterDeployment) syncDeletedClusterDeployment(cd *hivev1.Clu return reconcile.Result{}, err } + customizationReleased := false + if deprovisioned { + customizationReleased, err = r.ensureClusterDeploymentCustomizationIsReleased(cd, cdLog) + if err != nil { + return reconcile.Result{}, err + } + } + switch { case !deprovisioned: return reconcile.Result{}, nil case !dnsZoneGone: return reconcile.Result{RequeueAfter: defaultRequeueTime}, nil + case !customizationReleased: + return reconcile.Result{}, nil default: - cdLog.Infof("DNSZone gone and deprovision request completed, removing finalizer") + cdLog.Infof("DNSZone gone, customization released and deprovision request completed, removing finalizer") if err := r.removeClusterDeploymentFinalizer(cd, cdLog); err != nil { cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error removing finalizer") return reconcile.Result{}, err @@ -1404,6 +1435,12 @@ func (r *ReconcileClusterDeployment) addClusterDeploymentFinalizer(cd *hivev1.Cl return r.Update(context.TODO(), cd) } +func (r *ReconcileClusterDeployment) addClusterDeploymentCustomizationFinalizer(cd *hivev1.ClusterDeployment) error { + cd = cd.DeepCopy() + controllerutils.AddFinalizer(cd, hivev1.FinalizerCustomizationRelease) + return r.Update(context.TODO(), cd) +} + func (r *ReconcileClusterDeployment) removeClusterDeploymentFinalizer(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) error { cd = cd.DeepCopy() @@ -1422,6 +1459,46 @@ func (r *ReconcileClusterDeployment) removeClusterDeploymentFinalizer(cd *hivev1 return nil } +func (r *ReconcileClusterDeployment) releaseClusterDeploymentCustomization(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) error { + customizationRef := cd.Spec.ClusterPoolRef.ClusterDeploymentCustomizationRef + cdc := &hivev1.ClusterDeploymentCustomization{} + err := r.Client.Get(context.TODO(), client.ObjectKey{Namespace: cd.Spec.ClusterPoolRef.Namespace, Name: customizationRef.Name}, cdc) + if err != nil { + if apierrors.IsNotFound(err) { + cdLog.Infof("customization not found: %s/%s, nothing to release", cd.Namespace, customizationRef.Name) + controllerutils.DeleteFinalizer(cd, hivev1.FinalizerCustomizationRelease) + return nil + } + log.WithError(err).Error("error reading customization") + return err + } + + conds, changed := controllerutils.SetClusterDeploymentCustomizationCondition( + cdc.Status.Conditions, + hivev1.ClusterDeploymentCustomizationAvailableCondition, + corev1.ConditionTrue, + "ClusterDeploymentCustomizationAvailable", + "Cluster Deployment Customization is available", + controllerutils.UpdateConditionIfReasonOrMessageChange, + ) + if changed { + cdc.Status.Conditions = conds + cdc.Status.ClusterDeploymentRef = nil + if err := r.Status().Update(context.Background(), cdc); err != nil { + cdLog.Infof("Failed to update ClusterDeploymentCustomization %s condition", customizationRef.Name) + return err + } + } + + controllerutils.DeleteFinalizer(cd, hivev1.FinalizerCustomizationRelease) + if err := r.Update(context.TODO(), cd); err != nil { + cdLog.Infof("Failed to update ClusterDeployment after ClusterDeploymentCustomization finalizer deletion") + return err + } + + return nil +} + // setDNSDelayMetric will calculate the amount of time elapsed from clusterdeployment creation // to when the dnszone became ready, and set a metric to report the delay. // Will return a bool indicating whether the clusterdeployment has been modified, and whether any error was encountered. diff --git a/pkg/controller/clusterpool/clusterpool_controller.go b/pkg/controller/clusterpool/clusterpool_controller.go index e52374ed1e0..159ff5f6e52 100644 --- a/pkg/controller/clusterpool/clusterpool_controller.go +++ b/pkg/controller/clusterpool/clusterpool_controller.go @@ -2,16 +2,19 @@ package clusterpool import ( "context" + "encoding/json" "fmt" "math" "reflect" "sort" + "github.com/ghodss/yaml" "github.com/pkg/errors" log "github.com/sirupsen/logrus" utilerrors "k8s.io/apimachinery/pkg/util/errors" "github.com/davegardnerisme/deephash" + jsonpatch "github.com/evanphx/json-patch" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -46,6 +49,7 @@ const ( icSecretDependent = "install config template secret" cdClusterPoolIndex = "spec.clusterpool.namespacedname" claimClusterPoolIndex = "spec.clusterpoolname" + defaultInventoryAttempts = 5 ) var ( @@ -54,6 +58,7 @@ var ( hivev1.ClusterPoolMissingDependenciesCondition, hivev1.ClusterPoolCapacityAvailableCondition, hivev1.ClusterPoolAllClustersCurrentCondition, + hivev1.ClusterPoolInventoryValidCondition, } ) @@ -289,6 +294,8 @@ func (r *ReconcileClusterPool) Reconcile(ctx context.Context, request reconcile. if err != nil { return reconcile.Result{}, err } + r.updateInventory(clp, cds.Unassigned(false), true, "", logger) + r.updateInventory(clp, cds.Installing(), true, "", logger) claims, err := getAllClaimsForPool(r.Client, clp, logger) if err != nil { @@ -373,6 +380,7 @@ func (r *ReconcileClusterPool) Reconcile(ctx context.Context, request reconcile. // consume our maxConcurrent with additions than deletions. But we put it before the // "deleteExcessClusters" case because we would rather trim broken clusters than viable ones. case len(cds.Broken()) > 0: + r.updateInventory(clp, cds.Broken(), false, "cloud broken", logger) if err := r.deleteBrokenClusters(cds, availableCurrent, logger); err != nil { return reconcile.Result{}, err } @@ -415,6 +423,106 @@ func (r *ReconcileClusterPool) Reconcile(ctx context.Context, request reconcile. return reconcile.Result{}, nil } +func (r *ReconcileClusterPool) updateInventory(clp *hivev1.ClusterPool, cds []*hivev1.ClusterDeployment, valid bool, state string, logger log.FieldLogger) { + if clp.Spec.Inventory != nil { + var active_cdc []string + for _, cd := range cds { + cdcRef := cd.Spec.ClusterPoolRef.ClusterDeploymentCustomizationRef + active_cdc = append(active_cdc, cdcRef.Name) + cdc := &hivev1.ClusterDeploymentCustomization{} + if err := r.Client.Get(context.Background(), client.ObjectKey{Namespace: clp.Namespace, Name: cdcRef.Name}, cdc); err != nil { + if apierrors.IsNotFound(err) { + r.setInventoryValidCondition(clp, true, cdc.Name, "missing", logger) + } + log.WithError(err).Warn("error reading customization") + continue + } + if cdc.Status.ClusterDeploymentRef == nil { + cdc.Status.ClusterDeploymentRef = &corev1.ObjectReference{Name: cd.Name, Namespace: cd.Namespace} + } + cdc.Status.LastApplyTime = metav1.Now() + if valid { + cdc.Status.LastApplyStatus = "success" + } else { + cdc.Status.LastApplyStatus = "failed" + } + conds, changed := controllerutils.SetClusterDeploymentCustomizationCondition( + cdc.Status.Conditions, + hivev1.ClusterDeploymentCustomizationAvailableCondition, + corev1.ConditionFalse, + "Reservation", + "Reserving cluster deployment customization", + controllerutils.UpdateConditionIfReasonOrMessageChange, + ) + if changed { + cdc.Status.Conditions = conds + } + + if err := r.Status().Update(context.Background(), cdc); err != nil { + if apierrors.IsNotFound(err) { + r.setInventoryValidCondition(clp, true, cdc.Name, "missing", logger) + } + log.WithError(err).Warn("failed to update customization status") + } + r.setInventoryValidCondition(clp, !valid, cdc.Name, state, logger) + } + sort.Strings(active_cdc) + for _, item := range clp.Spec.Inventory { + if sort.SearchStrings(active_cdc, item.Name) == len(active_cdc) { + cdc := &hivev1.ClusterDeploymentCustomization{} + if err := r.Client.Get(context.Background(), client.ObjectKey{Namespace: clp.Namespace, Name: item.Name}, cdc); err != nil { + if apierrors.IsNotFound(err) { + r.setInventoryValidCondition(clp, true, cdc.Name, "missing", logger) + } + continue + } + r.setInventoryValidCondition(clp, false, cdc.Name, "missing", logger) + currentAvailability := controllerutils.FindClusterDeploymentCustomizationCondition( + cdc.Status.Conditions, + hivev1.ClusterDeploymentCustomizationAvailableCondition, + ) + if cdc.Status.ClusterDeploymentRef != nil { + cd := &hivev1.ClusterDeployment{} + ref := client.ObjectKey{Namespace: cdc.Status.ClusterDeploymentRef.Namespace, Name: cdc.Status.ClusterDeploymentRef.Name} + if err := r.Client.Get(context.Background(), ref, cd); err != nil { + if apierrors.IsNotFound(err) { + cdc.Status.ClusterDeploymentRef = nil + } + } + } + availableWithCD := (currentAvailability != nil && currentAvailability.Status == corev1.ConditionTrue) && cdc.Status.ClusterDeploymentRef != nil + reservedWithoutCD := (currentAvailability != nil && currentAvailability.Status == corev1.ConditionFalse) && cdc.Status.ClusterDeploymentRef == nil + if availableWithCD || reservedWithoutCD { + status := corev1.ConditionTrue + reason := "available" + message := "Available" + if availableWithCD { + status = corev1.ConditionFalse + reason = "Reservation" + message = "Fixed reservation" + } + conds, changed := controllerutils.SetClusterDeploymentCustomizationCondition( + cdc.Status.Conditions, + hivev1.ClusterDeploymentCustomizationAvailableCondition, + status, + reason, + message, + controllerutils.UpdateConditionIfReasonOrMessageChange, + ) + + if changed { + cdc.Status.Conditions = conds + } + + if err := r.Status().Update(context.Background(), cdc); err != nil { + log.Error("could not update broken ClusterDeploymentCustomization: %s", cdc.Name) + } + } + } + } + } +} + // reconcileRunningClusters ensures the oldest unassigned clusters are set to running, and the // remainder are set to hibernating. The number of clusters we set to running is determined by // adding the cluster's configured runningCount to the number of unsatisfied claims for which we're @@ -480,6 +588,12 @@ func calculatePoolVersion(clp *hivev1.ClusterPool) string { ba = append(ba, deephash.Hash(clp.Spec.BaseDomain)...) ba = append(ba, deephash.Hash(clp.Spec.ImageSetRef)...) ba = append(ba, deephash.Hash(clp.Spec.InstallConfigSecretTemplateRef)...) + // Inventory changes the behavior of cluster pool, thus it needs to be in the pool version. + // But to avoid redployment of clusters if inventory changes, a fixed string is added to pool version. + // https://github.com/openshift/hive/blob/master/docs/enhancements/clusterpool-inventory.md#pool-version + if clp.Spec.Inventory != nil { + ba = append(ba, []byte("hasInventory")...) + } // Hash of hashes to ensure fixed length return fmt.Sprintf("%x", deephash.Hash(ba)) } @@ -653,6 +767,14 @@ func (r *ReconcileClusterPool) createCluster( poolVersion string, logger log.FieldLogger, ) (*hivev1.ClusterDeployment, error) { + cdc := &hivev1.ClusterDeploymentCustomization{} + var err error + if clp.Spec.Inventory != nil { + if cdc, err = r.getInventoryCustomization(clp, logger); err != nil { + return nil, err + } + } + ns, err := r.createRandomNamespace(clp) if err != nil { logger.WithError(err).Error("error obtaining random namespace") @@ -696,6 +818,7 @@ func (r *ReconcileClusterPool) createCluster( poolKey := types.NamespacedName{Namespace: clp.Namespace, Name: clp.Name}.String() r.expectations.ExpectCreations(poolKey, 1) var cd *hivev1.ClusterDeployment + var ics *corev1.Secret // Add the ClusterPoolRef to the ClusterDeployment, and move it to the end of the slice. for i, obj := range objs { var ok bool @@ -704,10 +827,59 @@ func (r *ReconcileClusterPool) createCluster( continue } poolRef := poolReference(clp) + if clp.Spec.Inventory != nil { + poolRef.ClusterDeploymentCustomizationRef = &corev1.LocalObjectReference{Name: cdc.Name} + } cd.Spec.ClusterPoolRef = &poolRef lastIndex := len(objs) - 1 objs[i], objs[lastIndex] = objs[lastIndex], objs[i] } + // Apply inventory customization + if clp.Spec.Inventory != nil { + for _, obj := range objs { + if !isInstallConfigSecret(obj) { + continue + } + ics = obj.(*corev1.Secret) + installConfig, err := applyPatches(cdc.Spec.InstallConfigPatches, ics.StringData["install-config.yaml"], logger) + if err != nil { + r.setInventoryValidCondition(clp, true, cdc.Name, "config broken", logger) + cdc.Status.LastApplyStatus = "failed" + cdc.Status.LastApplyTime = metav1.Now() + conds, changed := controllerutils.SetClusterDeploymentCustomizationCondition( + cdc.Status.Conditions, + hivev1.ClusterDeploymentCustomizationAvailableCondition, + corev1.ConditionTrue, + "available", + "Available", + controllerutils.UpdateConditionIfReasonOrMessageChange, + ) + + if changed { + cdc.Status.Conditions = conds + } + + if err := r.Status().Update(context.Background(), cdc); err != nil { + if apierrors.IsNotFound(err) { + r.setInventoryValidCondition(clp, true, cdc.Name, "missing", logger) + } + return nil, errors.New("could not update ClusterDeploymentCustomization conditions") // TODO: CDC cleanup process needed + } + + return nil, err + } else { + cdc.Status.LastApplyStatus = "success" + cdc.Status.LastApplyTime = metav1.Now() + cdc.Status.ClusterDeploymentRef = &corev1.ObjectReference{Name: cd.Name, Namespace: cd.Namespace} + if err := r.Status().Update(context.Background(), cdc); err != nil { + log.Warning("could not update ClusterDeploymentCustomization status") + } + } + + ics.StringData["install-config.yaml"] = installConfig + } + } + // Create the resources. for _, obj := range objs { if err := r.Client.Create(context.Background(), obj.(client.Object)); err != nil { @@ -914,6 +1086,87 @@ func (r *ReconcileClusterPool) setAvailableCapacityCondition(pool *hivev1.Cluste return nil } +func (r *ReconcileClusterPool) setInventoryValidCondition(pool *hivev1.ClusterPool, add bool, cdcName string, state string, logger log.FieldLogger) error { + currentCondition := controllerutils.FindClusterPoolCondition(pool.Status.Conditions, hivev1.ClusterPoolInventoryValidCondition) + currentMessage := map[string][]string{} + emptyMessage := map[string][]string{ + "cloud broken": {}, + "config broken": {}, + "missing": {}, + } + if currentCondition.Message == "" { + currentMessage = emptyMessage + } else { + json.Unmarshal([]byte(currentCondition.Message), ¤tMessage) + } + + remove := func(s []string, i string) []string { + sort.Strings(s) + pos := sort.SearchStrings(s, i) + if pos < len(s) { + s[pos] = s[len(s)-1] + return s[:len(s)-1] + } + return s + } + + for _, removeState := range []string{"cloud broken", "config broken", "missing"} { + currentMessage[state] = remove(currentMessage[removeState], "") + } + if add && currentCondition.Status == corev1.ConditionTrue { + newList := []string{cdcName} + emptyMessage[state] = newList + currentMessage = emptyMessage + } else if add { + for _, removeState := range []string{"cloud broken", "config broken", "missing"} { + if state != removeState { + currentMessage[state] = remove(currentMessage[removeState], cdcName) + } + } + sort.Strings(currentMessage[state]) + pos := sort.SearchStrings(currentMessage[state], cdcName) + if pos == len(currentMessage[state]) { + currentMessage[state] = append(currentMessage[state], cdcName) + } + } else if state == "" { + for _, removeState := range []string{"cloud broken", "config broken", "missing"} { + currentMessage[state] = remove(currentMessage[removeState], cdcName) + } + return nil + } else { + currentMessage[state] = remove(currentMessage[state], cdcName) + } + + status := corev1.ConditionTrue + reason := "InventoryValid" + if (len(currentMessage["cloud broken"]) + len(currentMessage["config broken"]) + len(currentMessage["missing"])) > 0 { + status = corev1.ConditionFalse + reason = "Invalid" + } + messageByte, err := json.Marshal(currentMessage) + if err != nil { + return errors.Wrap(err, "could not update ClusterPool conditions") + } + + conditions, changed := controllerutils.SetClusterPoolConditionWithChangeCheck( + pool.Status.Conditions, + hivev1.ClusterPoolInventoryValidCondition, + status, + reason, + string(messageByte), + controllerutils.UpdateConditionIfReasonOrMessageChange, + ) + + if changed { + pool.Status.Conditions = conditions + if err := r.Status().Update(context.TODO(), pool); err != nil { + logger.WithError(err).Log(controllerutils.LogLevel(err), "could not update ClusterPool conditions") + return errors.Wrap(err, "could not update ClusterPool conditions") + } + } + return nil +} + func (r *ReconcileClusterPool) verifyClusterImageSet(pool *hivev1.ClusterPool, logger log.FieldLogger) error { err := r.Get(context.Background(), client.ObjectKey{Name: pool.Spec.ImageSetRef.Name}, &hivev1.ClusterImageSet{}) if err != nil { @@ -1006,9 +1259,131 @@ func (r *ReconcileClusterPool) createCloudBuilder(pool *hivev1.ClusterPool, logg cloudBuilder.Region = platform.Azure.Region cloudBuilder.CloudName = platform.Azure.CloudName return cloudBuilder, nil - // TODO: OpenStack, VMware, and Ovirt. + case platform.OpenStack != nil: + credsSecret, err := r.getCredentialsSecret(pool, platform.OpenStack.CredentialsSecretRef.Name, logger) + if err != nil { + return nil, err + } + cloudBuilder := clusterresource.NewOpenStackCloudBuilderFromSecret(credsSecret) + cloudBuilder.Cloud = platform.OpenStack.Cloud + return cloudBuilder, nil + // TODO: VMware, and Ovirt. default: logger.Info("unsupported platform") return nil, errors.New("unsupported platform") } } + +// INFO: [Fairness](https://github.com/openshift/hive/blob/master/docs/enhancements/clusterpool-inventory.md#fairness) +// The function loops over the list of inventory items and picks the first available customization. +// Failing to apply a customization (in any cluster pool) will cause to change its status to unvailable and a new cluster will be queued. +func (r *ReconcileClusterPool) getInventoryCustomization(pool *hivev1.ClusterPool, logger log.FieldLogger) (*hivev1.ClusterDeploymentCustomization, error) { + var inventory ClusterDeploymentCustomizations + for _, entry := range pool.Spec.Inventory { + if entry.Kind == hivev1.ClusterDeploymentCustomizationInventoryEntry || entry.Kind == "" { + cdc := &hivev1.ClusterDeploymentCustomization{} + if err := r.Client.Get(context.Background(), client.ObjectKey{Namespace: pool.Namespace, Name: entry.Name}, cdc); err != nil { + if apierrors.IsNotFound(err) { + r.setInventoryValidCondition(pool, true, cdc.Name, "missing", logger) + } + continue + } + currentAvailability := controllerutils.FindClusterDeploymentCustomizationCondition( + cdc.Status.Conditions, + hivev1.ClusterDeploymentCustomizationAvailableCondition, + ) + if currentAvailability == nil || currentAvailability.Status == corev1.ConditionTrue { + inventory = append(inventory, *cdc) + } + } + } + if inventory.Len() > 0 { + sort.Sort(inventory) + cdc := &inventory[0] + conds, changed := controllerutils.SetClusterDeploymentCustomizationCondition( + cdc.Status.Conditions, + hivev1.ClusterDeploymentCustomizationAvailableCondition, + corev1.ConditionFalse, + "Reservation", + "Reserving cluster deployment customization", + controllerutils.UpdateConditionIfReasonOrMessageChange, + ) + if changed { + cdc.Status.Conditions = conds + if err := r.Status().Update(context.Background(), cdc); err != nil { + if apierrors.IsNotFound(err) { + r.setInventoryValidCondition(pool, true, cdc.Name, "missing", logger) + } + return nil, errors.New("could not update ClusterDeploymentCustomization conditions") + } + } + + return cdc, nil + } + + return nil, errors.New("no customization available") +} + +func applyPatches(patches []hivev1.PatchEntity, data string, logger log.FieldLogger) (string, error) { + targetJson, err := yaml.YAMLToJSON([]byte(data)) + if err != nil { + log.WithError(err).Error("unable to parse install-config template") + return data, err + } + + patchJson, err := json.Marshal(patches) + if err != nil { + log.WithError(err).Error("unable to marshal patches to json") + return data, err + } + + patch, err := jsonpatch.DecodePatch(patchJson) + if err != nil { + log.WithError(err).Error("unable to create json patch") + return data, err + } + + patchedJson, err := patch.Apply(targetJson) + if err != nil { + log.WithError(err).Error("unable to patch install-config template") + return data, err + } + + patchedYaml, _ := yaml.JSONToYAML(patchedJson) + + return string(patchedYaml), nil +} + +func isInstallConfigSecret(obj interface{}) bool { + if secret, ok := obj.(*corev1.Secret); ok { + _, ok := secret.StringData["install-config.yaml"] + if ok { + return true + } + } + return false +} + +// ClusterDeploymentCustomizations is a list ClusterDeploymentCustomization objects +type ClusterDeploymentCustomizations []hivev1.ClusterDeploymentCustomization + +// Len is the number of elements in the collection. +func (c ClusterDeploymentCustomizations) Len() int { return len(c) } + +// Less reports whether the element with index i should sort before the element with index j. +func (c ClusterDeploymentCustomizations) Less(i, j int) bool { + status_a := c[i].Status.LastApplyStatus + status_b := c[j].Status.LastApplyStatus + time_a := c[i].Status.LastApplyTime + time_b := c[j].Status.LastApplyTime + if status_a == status_b { + return time_a.Before(&time_b) + } + if status_a == "success" { + return false + } + return true +} + +// Swap swaps the elements with indexes i and j. +func (c ClusterDeploymentCustomizations) Swap(i, j int) { c[i], c[j] = c[j], c[i] } diff --git a/pkg/controller/clusterpool/clusterpool_controller_test.go b/pkg/controller/clusterpool/clusterpool_controller_test.go index 59ce0524613..4cbd53e3042 100644 --- a/pkg/controller/clusterpool/clusterpool_controller_test.go +++ b/pkg/controller/clusterpool/clusterpool_controller_test.go @@ -28,6 +28,7 @@ import ( controllerutils "github.com/openshift/hive/pkg/controller/utils" testclaim "github.com/openshift/hive/pkg/test/clusterclaim" testcd "github.com/openshift/hive/pkg/test/clusterdeployment" + testcdc "github.com/openshift/hive/pkg/test/clusterdeploymentcustomization" testcp "github.com/openshift/hive/pkg/test/clusterpool" testgeneric "github.com/openshift/hive/pkg/test/generic" testsecret "github.com/openshift/hive/pkg/test/secret" @@ -38,6 +39,7 @@ const ( testLeasePoolName = "aws-us-east-1" credsSecretName = "aws-creds" imageSetName = "test-image-set" + cdcName = "test-cdc" ) func TestReconcileClusterPool(t *testing.T) { @@ -72,6 +74,20 @@ func TestReconcileClusterPool(t *testing.T) { Status: corev1.ConditionUnknown, Type: hivev1.ClusterPoolAllClustersCurrentCondition, }), + testcp.WithCondition(hivev1.ClusterPoolCondition{ + Status: corev1.ConditionUnknown, + Type: hivev1.ClusterPoolInventoryValidCondition, + }), + ) + + inventoryPoolVersion := "17d682718ef4859e" + inventroyPoolBuilder := initializedPoolBuilder.Options( + testcp.WithInventory([]hivev1.InventoryEntry{ + { + Kind: hivev1.ClusterDeploymentCustomizationInventoryEntry, + Name: cdcName, + }, + }), ) cdBuilder := func(name string) testcd.Builder { return testcd.FullBuilder(name, name, scheme).Options( @@ -92,6 +108,8 @@ func TestReconcileClusterPool(t *testing.T) { existing []runtime.Object noClusterImageSet bool noCredsSecret bool + noCustomization bool + inventory bool expectError bool expectedTotalClusters int expectedObservedSize int32 @@ -101,6 +119,7 @@ func TestReconcileClusterPool(t *testing.T) { expectedMissingDependenciesStatus corev1.ConditionStatus expectedCapacityStatus corev1.ConditionStatus expectedCDCurrentStatus corev1.ConditionStatus + expectedInventoryVaildStatus corev1.ConditionStatus expectedMissingDependenciesMessage string expectedAssignedClaims int expectedUnassignedClaims int @@ -121,6 +140,7 @@ func TestReconcileClusterPool(t *testing.T) { expectedMissingDependenciesStatus: corev1.ConditionUnknown, expectedCapacityStatus: corev1.ConditionUnknown, expectedCDCurrentStatus: corev1.ConditionUnknown, + expectedInventoryVaildStatus: corev1.ConditionUnknown, }, { name: "copyover fields", @@ -166,6 +186,72 @@ func TestReconcileClusterPool(t *testing.T) { }, expectPoolVersionChanged: true, }, + { + name: "poolVersion changes with new Inventory", + existing: []runtime.Object{ + initializedPoolBuilder.Build(testcp.WithInventory( + []hivev1.InventoryEntry{ + { + Kind: hivev1.ClusterDeploymentCustomizationInventoryEntry, + Name: cdcName, + }, + }, + )), + }, + expectPoolVersionChanged: true, + }, + { + name: "poolVersion doens't changes with existing Inventory", + existing: []runtime.Object{ + inventroyPoolBuilder.Build(testcp.WithInventory( + []hivev1.InventoryEntry{ + { + Kind: hivev1.ClusterDeploymentCustomizationInventoryEntry, + Name: "test-cdc-2", + }, + }, + )), + }, + inventory: true, + expectPoolVersionChanged: false, + }, + { + name: "poolVersion doens't changes with existing Inventory 2", + existing: []runtime.Object{ + inventroyPoolBuilder.Build(), + }, + inventory: true, + expectPoolVersionChanged: false, + }, + { + name: "customized clusterpool will creates a cluster", + existing: []runtime.Object{ + inventroyPoolBuilder.Build(testcp.WithSize(1)), + }, + inventory: true, + expectedTotalClusters: 1, + expectedObservedSize: 0, + expectedObservedReady: 0, + expectedInventoryVaildStatus: corev1.ConditionUnknown, + }, + { + name: "customized clusterpool inventory valid", + existing: []runtime.Object{ + inventroyPoolBuilder.Build(testcp.WithSize(1)), + testcd.FullBuilder("c1", "c1", scheme).Build( + testcd.WithPoolVersion(inventoryPoolVersion), + testcd.WithPowerState(hivev1.ClusterPowerStateHibernating), + testcd.WithUnclaimedClusterPoolReference(testNamespace, testLeasePoolName), + testcd.WithClusterDeploymentCustomizationReference(cdcName), + testcd.Running(), + ), + }, + inventory: true, + expectedTotalClusters: 1, + expectedObservedSize: 1, + expectedObservedReady: 1, + expectedInventoryVaildStatus: corev1.ConditionTrue, + }, { // This also proves we only delete one stale cluster at a time name: "delete oldest stale cluster first", @@ -1417,6 +1503,12 @@ func TestReconcileClusterPool(t *testing.T) { Build(testsecret.WithDataKeyValue("dummykey", []byte("dummyval"))), ) } + if !test.noCustomization { + test.existing = append( + test.existing, + testcdc.FullBuilder(testNamespace, cdcName, scheme).Build(), + ) + } fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(test.existing...).Build() logger := log.New() logger.SetLevel(log.DebugLevel) @@ -1452,10 +1544,14 @@ func TestReconcileClusterPool(t *testing.T) { assert.Equal(t, test.expectedObservedSize, pool.Status.Size, "unexpected observed size") assert.Equal(t, test.expectedObservedReady, pool.Status.Ready, "unexpected observed ready count") currentPoolVersion := calculatePoolVersion(pool) + expectedPoolVersion := initialPoolVersion + if test.inventory { + expectedPoolVersion = inventoryPoolVersion + } assert.Equal( - t, test.expectPoolVersionChanged, currentPoolVersion != initialPoolVersion, + t, test.expectPoolVersionChanged, currentPoolVersion != expectedPoolVersion, "expectPoolVersionChanged is %t\ninitial %q\nfinal %q", - test.expectPoolVersionChanged, initialPoolVersion, currentPoolVersion) + test.expectPoolVersionChanged, expectedPoolVersion, currentPoolVersion) expectedCDCurrentStatus := test.expectedCDCurrentStatus if expectedCDCurrentStatus == "" { expectedCDCurrentStatus = corev1.ConditionTrue @@ -1486,6 +1582,14 @@ func TestReconcileClusterPool(t *testing.T) { } } + if test.expectedInventoryVaildStatus != "" { + inventoryValidCondition := controllerutils.FindClusterPoolCondition(pool.Status.Conditions, hivev1.ClusterPoolInventoryValidCondition) + if assert.NotNil(t, inventoryValidCondition, "did not find InventoryValid condition") { + assert.Equal(t, test.expectedInventoryVaildStatus, inventoryValidCondition.Status, + "unexpcted InventoryValid condition status") + } + } + cds := &hivev1.ClusterDeploymentList{} err = fakeClient.List(context.Background(), cds) require.NoError(t, err) diff --git a/pkg/controller/clusterpool/collections.go b/pkg/controller/clusterpool/collections.go index 64b4da6b297..8751fa21f76 100644 --- a/pkg/controller/clusterpool/collections.go +++ b/pkg/controller/clusterpool/collections.go @@ -263,6 +263,19 @@ func isBroken(cd *hivev1.ClusterDeployment, pool *hivev1.ClusterPool, logger log logger.Infof("Cluster %s is broken due to resume timeout", cd.Name) return true } + if cd.Spec.ClusterPoolRef != nil && cd.Spec.ClusterPoolRef.ClusterDeploymentCustomizationRef != nil { + customizationExists := false + cdcName := cd.Spec.ClusterPoolRef.ClusterDeploymentCustomizationRef.Name + for _, entry := range pool.Spec.Inventory { + if cdcName == entry.Name { + customizationExists = true + } + } + if !customizationExists { + logger.Infof("Cluster %s is broken due to removed customization %s", cd.Name, cdcName) + return true + } + } return false } diff --git a/pkg/controller/hibernation/hibernation_controller.go b/pkg/controller/hibernation/hibernation_controller.go index f0882fcbb5e..de8b235ba97 100644 --- a/pkg/controller/hibernation/hibernation_controller.go +++ b/pkg/controller/hibernation/hibernation_controller.go @@ -223,6 +223,7 @@ func (r *hibernationReconciler) Reconcile(ctx context.Context, request reconcile changed := r.setCDCondition(cd, hivev1.ClusterHibernatingCondition, hivev1.HibernatingReasonUnsupported, msg, corev1.ConditionFalse, cdLog) if changed { + cd.Status.PowerState = hivev1.ClusterPowerStateRunning return reconcile.Result{}, r.updateClusterDeploymentStatus(cd, cdLog) } } else if hibernatingCondition.Reason == hivev1.HibernatingReasonUnsupported { @@ -233,6 +234,7 @@ func (r *hibernationReconciler) Reconcile(ctx context.Context, request reconcile } isFakeCluster := controllerutils.IsFakeCluster(cd) + isOnpremCustomized := controllerutils.IsOnpremCustomized(cd) clusterSync := &hiveintv1alpha1.ClusterSync{} if err := r.Get(context.Background(), types.NamespacedName{Namespace: cd.Namespace, Name: cd.Name}, clusterSync); err != nil { @@ -371,9 +373,15 @@ func (r *hibernationReconciler) Reconcile(ctx context.Context, request reconcile return r.checkClusterStopped(cd, false, cdLog) } // If we get here, we're not supposed to be hibernating - if isFakeCluster { - changed := r.setCDCondition(cd, hivev1.ClusterHibernatingCondition, hivev1.HibernatingReasonResumingOrRunning, - clusterResumingOrRunningMsg, corev1.ConditionFalse, cdLog) + if isFakeCluster || isOnpremCustomized { + changed := false + if supported, msg := r.hibernationSupported(cd); !supported { + changed = r.setCDCondition(cd, hivev1.ClusterHibernatingCondition, hivev1.HibernatingReasonUnsupported, + msg, corev1.ConditionFalse, cdLog) + } else { + changed = r.setCDCondition(cd, hivev1.ClusterHibernatingCondition, hivev1.HibernatingReasonResumingOrRunning, + clusterResumingOrRunningMsg, corev1.ConditionFalse, cdLog) + } rChanged := r.setCDCondition(cd, hivev1.ClusterReadyCondition, hivev1.ReadyReasonRunning, clusterRunningMsg, corev1.ConditionTrue, cdLog) if changed || rChanged { diff --git a/pkg/controller/hibernation/hibernation_controller_test.go b/pkg/controller/hibernation/hibernation_controller_test.go index 4bc77eb7677..be0fe207a5c 100644 --- a/pkg/controller/hibernation/hibernation_controller_test.go +++ b/pkg/controller/hibernation/hibernation_controller_test.go @@ -26,6 +26,7 @@ import ( machineapi "github.com/openshift/api/machine/v1beta1" hivev1 "github.com/openshift/hive/apis/hive/v1" + hivev1openstack "github.com/openshift/hive/apis/hive/v1/openstack" hiveintv1alpha1 "github.com/openshift/hive/apis/hiveinternal/v1alpha1" "github.com/openshift/hive/pkg/constants" "github.com/openshift/hive/pkg/controller/hibernation/mock" @@ -292,6 +293,22 @@ func TestReconcile(t *testing.T) { assert.Equal(t, hivev1.ReadyReasonStoppingOrHibernating, runCond.Reason) }, }, + { + name: "customized CD will not hibernate", + cd: cdBuilder.Options(o.customized, o.shouldRun).Build(), + // The clustersync controller creates a ClusterSync even when there are no syncsets + cs: csBuilder.Build(), + validate: func(t *testing.T, cd *hivev1.ClusterDeployment) { + cond, runCond := getHibernatingAndRunningConditions(cd) + require.NotNil(t, cond) + assert.Equal(t, corev1.ConditionFalse, cond.Status) + assert.Equal(t, hivev1.HibernatingReasonResumingOrRunning, cond.Reason) + assert.Equal(t, hivev1.ClusterPowerStateRunning, cd.Status.PowerState) + require.NotNil(t, runCond) + assert.Equal(t, corev1.ConditionTrue, runCond.Status) + assert.Equal(t, hivev1.ReadyReasonRunning, runCond.Reason) + }, + }, { name: "start hibernating, syncsets not applied", cd: cdBuilder.Options(o.shouldHibernate, testcd.InstalledTimestamp(time.Now())).Build(), @@ -1263,6 +1280,13 @@ func readyCondition(status corev1.ConditionStatus, reason string, lastTransition type clusterDeploymentOptions struct{} +func (*clusterDeploymentOptions) customized(cd *hivev1.ClusterDeployment) { + cd.Spec.Platform.OpenStack = &hivev1openstack.Platform{} + cd.Spec.ClusterPoolRef = &hivev1.ClusterPoolReference{ + ClusterDeploymentCustomizationRef: &corev1.LocalObjectReference{Name: "cdc"}, + } +} + func (*clusterDeploymentOptions) notInstalled(cd *hivev1.ClusterDeployment) { cd.Spec.Installed = false } diff --git a/pkg/controller/utils/clusterdeployment.go b/pkg/controller/utils/clusterdeployment.go index 6f1e7d43a65..f0590435e51 100644 --- a/pkg/controller/utils/clusterdeployment.go +++ b/pkg/controller/utils/clusterdeployment.go @@ -24,6 +24,12 @@ func IsFakeCluster(cd *hivev1.ClusterDeployment) bool { return fakeCluster && err == nil } +func IsOnpremCustomized(cd *hivev1.ClusterDeployment) bool { + customized := cd.Spec.ClusterPoolRef != nil + onprem := cd.Spec.Platform.OpenStack != nil + return customized && onprem +} + // IsClusterPausedOrRelocating checks if the syncing to the cluster is paused or if the cluster is relocating func IsClusterPausedOrRelocating(cd *hivev1.ClusterDeployment, logger log.FieldLogger) bool { if paused, err := strconv.ParseBool(cd.Annotations[constants.SyncsetPauseAnnotation]); err == nil && paused { diff --git a/pkg/controller/utils/conditions.go b/pkg/controller/utils/conditions.go index 7d49b4ebb09..405d734442a 100644 --- a/pkg/controller/utils/conditions.go +++ b/pkg/controller/utils/conditions.go @@ -542,6 +542,59 @@ func SetMachinePoolCondition( return newConditions } +// SetClusterDeploymentCustomizationCondition sets a condition on a ClusterDeploymentCustomization resource's status +func SetClusterDeploymentCustomizationCondition( + conditions []hivev1.ClusterDeploymentCustomizationCondition, + conditionType hivev1.ClusterDeploymentCustomizationConditionType, + status corev1.ConditionStatus, + reason string, + message string, + updateConditionCheck UpdateConditionCheck, +) ([]hivev1.ClusterDeploymentCustomizationCondition, bool) { + now := metav1.Now() + changed := false + existingCondition := FindClusterDeploymentCustomizationCondition(conditions, conditionType) + if existingCondition == nil { + changed = true + conditions = append( + conditions, + hivev1.ClusterDeploymentCustomizationCondition{ + Type: conditionType, + Status: status, + Reason: reason, + Message: message, + LastTransitionTime: now, + LastProbeTime: now, + }, + ) + } else { + if shouldUpdateCondition( + existingCondition.Status, existingCondition.Reason, existingCondition.Message, + status, reason, message, + updateConditionCheck, + ) { + if existingCondition.Status != status { + existingCondition.LastTransitionTime = now + } + existingCondition.Status = status + existingCondition.Reason = reason + existingCondition.Message = message + existingCondition.LastProbeTime = now + changed = true + } + } + return conditions, changed +} + +func FindClusterDeploymentCustomizationCondition(conditions []hivev1.ClusterDeploymentCustomizationCondition, conditionType hivev1.ClusterDeploymentCustomizationConditionType) *hivev1.ClusterDeploymentCustomizationCondition { + for i, condition := range conditions { + if condition.Type == conditionType { + return &conditions[i] + } + } + return nil +} + // SetMachinePoolConditionWithChangeCheck sets a condition on a MachinePool resource's status. // It returns the conditions as well a boolean indicating whether there was a change made // to the conditions. diff --git a/pkg/test/clusterdeployment/clusterdeployment.go b/pkg/test/clusterdeployment/clusterdeployment.go index fc270e59730..e2bddcb3dca 100644 --- a/pkg/test/clusterdeployment/clusterdeployment.go +++ b/pkg/test/clusterdeployment/clusterdeployment.go @@ -3,6 +3,7 @@ package clusterdeployment import ( "time" + corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -243,3 +244,9 @@ func WithClusterMetadata(clusterMetadata *hivev1.ClusterMetadata) Option { clusterDeployment.Spec.ClusterMetadata = clusterMetadata } } + +func WithClusterDeploymentCustomizationReference(cdcName string) Option { + return func(clusterDeployment *hivev1.ClusterDeployment) { + clusterDeployment.Spec.ClusterPoolRef.ClusterDeploymentCustomizationRef = &corev1.LocalObjectReference{Name: cdcName} + } +} diff --git a/pkg/test/clusterdeploymentcustomization/clusterdeploymentcustomization.go b/pkg/test/clusterdeploymentcustomization/clusterdeploymentcustomization.go new file mode 100644 index 00000000000..db2f28dd60b --- /dev/null +++ b/pkg/test/clusterdeploymentcustomization/clusterdeploymentcustomization.go @@ -0,0 +1,73 @@ +package clusterdeploymentcustomization + +import ( + "k8s.io/apimachinery/pkg/runtime" + + hivev1 "github.com/openshift/hive/apis/hive/v1" + + "github.com/openshift/hive/pkg/test/generic" +) + +// Option defines a function signature for any function that wants to be passed into Build +type Option func(*hivev1.ClusterDeploymentCustomization) + +// Build runs each of the functions passed in to generate the object. +func Build(opts ...Option) *hivev1.ClusterDeploymentCustomization { + retval := &hivev1.ClusterDeploymentCustomization{} + for _, o := range opts { + o(retval) + } + + return retval +} + +type Builder interface { + Build(opts ...Option) *hivev1.ClusterDeploymentCustomization + + Options(opts ...Option) Builder + + GenericOptions(opts ...generic.Option) Builder +} + +func BasicBuilder() Builder { + return &builder{} +} + +func FullBuilder(namespace, name string, typer runtime.ObjectTyper) Builder { + b := &builder{} + return b.GenericOptions( + generic.WithTypeMeta(typer), + generic.WithResourceVersion("1"), + generic.WithNamespace(namespace), + generic.WithName(name), + ) +} + +type builder struct { + options []Option +} + +func (b *builder) Build(opts ...Option) *hivev1.ClusterDeploymentCustomization { + return Build(append(b.options, opts...)...) +} + +func (b *builder) Options(opts ...Option) Builder { + return &builder{ + options: append(b.options, opts...), + } +} + +func (b *builder) GenericOptions(opts ...generic.Option) Builder { + options := make([]Option, len(opts)) + for i, o := range opts { + options[i] = Generic(o) + } + return b.Options(options...) +} + +// Generic allows common functions applicable to all objects to be used as Options to Build +func Generic(opt generic.Option) Option { + return func(clusterDeployment *hivev1.ClusterDeploymentCustomization) { + opt(clusterDeployment) + } +} diff --git a/pkg/test/clusterpool/clusterpool.go b/pkg/test/clusterpool/clusterpool.go index 8d156a4ce55..cf3d3559276 100644 --- a/pkg/test/clusterpool/clusterpool.go +++ b/pkg/test/clusterpool/clusterpool.go @@ -184,3 +184,9 @@ func WithRunningCount(size int) Option { clusterPool.Spec.RunningCount = int32(size) } } + +func WithInventory(inventory []hivev1.InventoryEntry) Option { + return func(clusterPool *hivev1.ClusterPool) { + clusterPool.Spec.Inventory = inventory + } +} diff --git a/pkg/validating-webhooks/hive/v1/clusterdeploymentcustomization_validating_admission_hook.go b/pkg/validating-webhooks/hive/v1/clusterdeploymentcustomization_validating_admission_hook.go new file mode 100644 index 00000000000..bfb7069bd54 --- /dev/null +++ b/pkg/validating-webhooks/hive/v1/clusterdeploymentcustomization_validating_admission_hook.go @@ -0,0 +1,285 @@ +package v1 + +import ( + "fmt" + "net/http" + "strings" + + log "github.com/sirupsen/logrus" + + admissionv1beta1 "k8s.io/api/admission/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + hivev1 "github.com/openshift/hive/apis/hive/v1" +) + +const ( + clusterDeploymentCustomizationGroup = "hive.openshift.io" + clusterDeploymentCustomizationVersion = "v1" + clusterDeploymentCustomizationResource = "clusterdeploymentcustomization" + + clusterDeploymentCustomizationAdmissionGroup = "admission.hive.openshift.io" + clusterDeploymentCustomizationAdmissionVersion = "v1" +) + +// ClusterDeploymentCustomizationlValidatingAdmissionHook is a struct that is used to reference what code should be run by the generic-admission-server. +type ClusterDeploymentCustomizationValidatingAdmissionHook struct { + decoder *admission.Decoder +} + +// NewClusterDeploymentCustomizationValidatingAdmissionHook constructs a new ClusterDeploymentCustomizationValidatingAdmissionHook +func NewClusterDeploymentCustomizationValidatingAdmissionHook(decoder *admission.Decoder) *ClusterDeploymentCustomizationValidatingAdmissionHook { + return &ClusterDeploymentCustomizationValidatingAdmissionHook{ + decoder: decoder, + } +} + +// ValidatingResource is called by generic-admission-server on startup to register the returned REST resource through which the +// webhook is accessed by the kube apiserver. +// For example, generic-admission-server uses the data below to register the webhook on the REST resource "/apis/admission.hive.openshift.io/v1/clusterdeploymentcustomizationvalidators". +// When the kube apiserver calls this registered REST resource, the generic-admission-server calls the Validate() method below. +func (a *ClusterDeploymentCustomizationValidatingAdmissionHook) ValidatingResource() (plural schema.GroupVersionResource, singular string) { + log.WithFields(log.Fields{ + "group": clusterDeploymentCustomizationAdmissionGroup, + "version": clusterDeploymentCustomizationAdmissionVersion, + "resource": "clusterdeploymentcustomizationvalidator", + }).Info("Registering validation REST resource") + + // NOTE: This GVR is meant to be different than the ClusterDeploymentCustomization CRD GVR which has group "hive.openshift.io". + return schema.GroupVersionResource{ + Group: clusterDeploymentCustomizationAdmissionGroup, + Version: clusterDeploymentCustomizationAdmissionVersion, + Resource: "clusterdeploymentcustomizationvalidators", + }, + "clusterdeploymentcustomizationvalidator" +} + +// Initialize is called by generic-admission-server on startup to setup any special initialization that your webhook needs. +func (a *ClusterDeploymentCustomizationValidatingAdmissionHook) Initialize(kubeClientConfig *rest.Config, stopCh <-chan struct{}) error { + log.WithFields(log.Fields{ + "group": clusterDeploymentCustomizationAdmissionGroup, + "version": clusterDeploymentCustomizationAdmissionVersion, + "resource": "clusterdeploymentcustomizationvalidator", + }).Info("Initializing validation REST resource") + return nil // No initialization needed right now. +} + +// Validate is called by generic-admission-server when the registered REST resource above is called with an admission request. +// Usually it's the kube apiserver that is making the admission validation request. +func (a *ClusterDeploymentCustomizationValidatingAdmissionHook) Validate(admissionSpec *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse { + contextLogger := log.WithFields(log.Fields{ + "operation": admissionSpec.Operation, + "group": admissionSpec.Resource.Group, + "version": admissionSpec.Resource.Version, + "resource": admissionSpec.Resource.Resource, + "method": "Validate", + }) + + if !a.shouldValidate(admissionSpec) { + contextLogger.Info("Skipping validation for request") + // The request object isn't something that this validator should validate. + // Therefore, we say that it's Allowed. + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } + } + + contextLogger.Info("Validating request") + + switch admissionSpec.Operation { + case admissionv1beta1.Create: + return a.validateCreate(admissionSpec) + case admissionv1beta1.Update: + return a.validateUpdate(admissionSpec) + default: + contextLogger.Info("Successful validation") + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } + } +} + +// shouldValidate explicitly checks if the request should validated. For example, this webhook may have accidentally been registered to check +// the validity of some other type of object with a different GVR. +func (a *ClusterDeploymentCustomizationValidatingAdmissionHook) shouldValidate(admissionSpec *admissionv1beta1.AdmissionRequest) bool { + contextLogger := log.WithFields(log.Fields{ + "operation": admissionSpec.Operation, + "group": admissionSpec.Resource.Group, + "version": admissionSpec.Resource.Version, + "resource": admissionSpec.Resource.Resource, + "method": "shouldValidate", + }) + + if admissionSpec.Resource.Group != clusterDeploymentCustomizationGroup { + contextLogger.Info("Returning False, not our group") + return false + } + + if admissionSpec.Resource.Version != clusterDeploymentCustomizationVersion { + contextLogger.Info("Returning False, it's our group, but not the right version") + return false + } + + if admissionSpec.Resource.Resource != clusterDeploymentCustomizationResource { + contextLogger.Info("Returning False, it's our group and version, but not the right resource") + return false + } + + // If we get here, then we're supposed to validate the object. + contextLogger.Debug("Returning True, passed all prerequisites.") + return true +} + +// validateCreate specifically validates create operations for ClusterDeploymentCustomization objects. +func (a *ClusterDeploymentCustomizationValidatingAdmissionHook) validateCreate(admissionSpec *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse { + contextLogger := log.WithFields(log.Fields{ + "operation": admissionSpec.Operation, + "group": admissionSpec.Resource.Group, + "version": admissionSpec.Resource.Version, + "resource": admissionSpec.Resource.Resource, + "method": "validateCreate", + }) + + cdc := &hivev1.ClusterDeploymentCustomization{} + if err := a.decoder.DecodeRaw(admissionSpec.Object, cdc); err != nil { + contextLogger.Errorf("Failed unmarshaling Object: %v", err.Error()) + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: err.Error(), + }, + } + } + + // Add the new data to the contextLogger + contextLogger.Data["object.Name"] = cdc.Name + + // TODO: Put Create Validation Here (or in openAPIV3Schema validation section of crd) + + if len(cdc.Name) > validation.DNS1123LabelMaxLength { + message := fmt.Sprintf("Invalid cluster deployment customization name (.meta.name): %s", validation.MaxLenError(validation.DNS1123LabelMaxLength)) + contextLogger.Error(message) + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: message, + }, + } + } + + allErrs := field.ErrorList{} + specPath := field.NewPath("spec") + + allErrs = append(allErrs, validateInstallConfigPatches(specPath.Child("installConfigPatches"), cdc.Spec.InstallConfigPatches)...) + + if len(allErrs) > 0 { + status := errors.NewInvalid(schemaGVK(admissionSpec.Kind).GroupKind(), admissionSpec.Name, allErrs).Status() + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &status, + } + } + + // If we get here, then all checks passed, so the object is valid. + contextLogger.Info("Successful validation") + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } +} + +// validateUpdate specifically validates update operations for ClusterDeployment objects. +func (a *ClusterDeploymentCustomizationValidatingAdmissionHook) validateUpdate(admissionSpec *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse { + contextLogger := log.WithFields(log.Fields{ + "operation": admissionSpec.Operation, + "group": admissionSpec.Resource.Group, + "version": admissionSpec.Resource.Version, + "resource": admissionSpec.Resource.Resource, + "method": "validateUpdate", + }) + + newObject := &hivev1.ClusterDeploymentCustomization{} + if err := a.decoder.DecodeRaw(admissionSpec.Object, newObject); err != nil { + contextLogger.Errorf("Failed unmarshaling Object: %v", err.Error()) + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: err.Error(), + }, + } + } + + // Add the new data to the contextLogger + contextLogger.Data["object.Name"] = newObject.Name + + oldObject := &hivev1.ClusterDeploymentCustomization{} + if err := a.decoder.DecodeRaw(admissionSpec.OldObject, oldObject); err != nil { + contextLogger.Errorf("Failed unmarshaling OldObject: %v", err.Error()) + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: err.Error(), + }, + } + } + + // Add the new data to the contextLogger + contextLogger.Data["oldObject.Name"] = oldObject.Name + + allErrs := field.ErrorList{} + specPath := field.NewPath("spec") + + allErrs = append(allErrs, validateInstallConfigPatches(specPath, newObject.Spec.InstallConfigPatches)...) + + if len(allErrs) > 0 { + contextLogger.WithError(allErrs.ToAggregate()).Info("failed validation") + status := errors.NewInvalid(schemaGVK(admissionSpec.Kind).GroupKind(), admissionSpec.Name, allErrs).Status() + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &status, + } + } + + // If we get here, then all checks passed, so the object is valid. + contextLogger.Info("Successful validation") + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } +} + +func validateInstallConfigPatches(path *field.Path, patches []hivev1.PatchEntity) field.ErrorList { + allErrs := field.ErrorList{} + + for i, patch := range patches { + if !isValidOP(patch.Op) { + allErrs = append(allErrs, field.Invalid(path.Index(i), patch, "install config patch op must be a valid json patch operation")) + } + if len(patch.Path) == 0 || !strings.HasPrefix(patch.Path, "/") { + allErrs = append(allErrs, field.Invalid(path.Index(i), patch, "install config patch path must start with '/'")) + } + } + return allErrs +} + +func isValidOP(op string) bool { + switch op { + case + "replace", + "add", + "remove", + "test", + "copy", + "move": + return true + } + return false +} diff --git a/pkg/validating-webhooks/hive/v1/clusterpool_validating_admission_hook.go b/pkg/validating-webhooks/hive/v1/clusterpool_validating_admission_hook.go index 1302c8dbad0..47d706fee15 100644 --- a/pkg/validating-webhooks/hive/v1/clusterpool_validating_admission_hook.go +++ b/pkg/validating-webhooks/hive/v1/clusterpool_validating_admission_hook.go @@ -179,6 +179,10 @@ func (a *ClusterPoolValidatingAdmissionHook) validateCreate(admissionSpec *admis allErrs = append(allErrs, validateClusterPlatform(specPath, newObject.Spec.Platform)...) + if newObject.Spec.Inventory != nil { + allErrs = append(allErrs, validateInventory(specPath, newObject.Spec.Inventory)...) + } + if len(allErrs) > 0 { status := errors.NewInvalid(schemaGVK(admissionSpec.Kind).GroupKind(), admissionSpec.Name, allErrs).Status() return &admissionv1beta1.AdmissionResponse{ @@ -239,6 +243,10 @@ func (a *ClusterPoolValidatingAdmissionHook) validateUpdate(admissionSpec *admis allErrs = append(allErrs, validateClusterPlatform(specPath, newObject.Spec.Platform)...) + if newObject.Spec.Inventory != nil { + allErrs = append(allErrs, validateInventory(specPath, newObject.Spec.Inventory)...) + } + if len(allErrs) > 0 { contextLogger.WithError(allErrs.ToAggregate()).Info("failed validation") status := errors.NewInvalid(schemaGVK(admissionSpec.Kind).GroupKind(), admissionSpec.Name, allErrs).Status() @@ -254,3 +262,11 @@ func (a *ClusterPoolValidatingAdmissionHook) validateUpdate(admissionSpec *admis Allowed: true, } } + +func validateInventory(path *field.Path, inventory []hivev1.InventoryEntry) field.ErrorList { + allErrs := field.ErrorList{} + if len(inventory) == 0 { + allErrs = append(allErrs, field.Invalid(path, inventory, "inventory can't be empty")) + } + return allErrs +} diff --git a/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeployment_types.go b/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeployment_types.go index 997af11f194..a8df72decb5 100644 --- a/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeployment_types.go +++ b/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeployment_types.go @@ -52,6 +52,10 @@ const ( // FinalizerArgoCDCluster is used on ClusterDeployments to ensure we clean up the ArgoCD cluster // secret before cleaning up the API object. FinalizerArgoCDCluster = "hive.openshift.io/argocd-cluster" + + // FinalizerCustomizationRelease is used on ClusterDeployments to ensure we run a successful + // release of ClusterDeploymentCustomization + FinalizerCustomizationRelease string = "hive.openshift.io/customization" ) // ClusterPowerState is used to indicate whether a cluster is running or in a @@ -276,6 +280,8 @@ type ClusterPoolReference struct { // ClaimedTimestamp is the time this cluster was assigned to a ClusterClaim. This is only used for // ClusterDeployments belonging to ClusterPools. ClaimedTimestamp *metav1.Time `json:"claimedTimestamp,omitempty"` + // ClusterDeploymentCustomizationRef is the ClusterPool Inventory claimed customization for this ClusterDeployment + ClusterDeploymentCustomizationRef *corev1.LocalObjectReference `json:"clusterDeploymentCustomization"` } // ClusterMetadata contains metadata information about the installed cluster. diff --git a/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go b/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go new file mode 100644 index 00000000000..cf738bc9a59 --- /dev/null +++ b/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go @@ -0,0 +1,96 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:noStatus +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterDeploymentCustomization is the Schema for clusterdeploymentcustomizations API +// +kubebuilder:subresource:status +// +k8s:openapi-gen=true +// +kubebuilder:resource:scope=Namespaced +type ClusterDeploymentCustomization struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ClusterDeploymentCustomizationSpec `json:"spec"` + Status ClusterDeploymentCustomizationStatus `json:"status,omitempty"` +} + +// ClusterDeploymentCustomizationSpec defines the desired state of ClusterDeploymentCustomization +type ClusterDeploymentCustomizationSpec struct { + // TODO: documentation + InstallConfigPatches []PatchEntity `json:"installConfigPatches,omitempty"` +} + +// TODO: documentation +type PatchEntity struct { + // +required + Op string `json:"op"` + // +required + Path string `json:"path"` + // +required + Value string `json:"value"` +} + +// ClusterDeploymentCustomizationStatus defines the observed state of ClusterDeploymentCustomization +type ClusterDeploymentCustomizationStatus struct { + // TODO: documentation + // +optional + ClusterDeploymentRef *corev1.ObjectReference `json:"clusterDeploymentRef,omitempty"` + + // +optional + LastApplyTime metav1.Time `json:"lastApplyTime,omitempty"` + + // +optional + LastApplyStatus string `json:"lastApplyStatus,omitempty"` + + // Conditions includes more detailed status for the cluster deployment customization status. + // +optional + Conditions []ClusterDeploymentCustomizationCondition `json:"conditions,omitempty"` +} + +type ClusterDeploymentCustomizationCondition struct { + // Type is the type of the condition. + Type ClusterDeploymentCustomizationConditionType `json:"type"` + // Status is the status of the condition. + Status corev1.ConditionStatus `json:"status"` + // LastProbeTime is the last time we probed the condition. + // +optional + LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"` + // LastTransitionTime is the last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + // Reason is a unique, one-word, CamelCase reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty"` + // Message is a human-readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty"` +} + +// ClusterDeploymentCustomizationConditionType is a valid value for ClusterDeploymentCustomizationCondition.Type +type ClusterDeploymentCustomizationConditionType string + +const ( + // TODO: add more types + // TODO: shorter name? + ClusterDeploymentCustomizationAvailableCondition ClusterDeploymentCustomizationConditionType = "Available" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterDeploymentCustomizationLis contains the list of ClusterDeploymentCustomization +type ClusterDeploymentCustomizationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []ClusterDeploymentCustomization `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ClusterDeploymentCustomization{}, &ClusterDeploymentCustomizationList{}) +} diff --git a/vendor/github.com/openshift/hive/apis/hive/v1/clusterpool_types.go b/vendor/github.com/openshift/hive/apis/hive/v1/clusterpool_types.go index 71e394a212c..c1bafa09a0f 100644 --- a/vendor/github.com/openshift/hive/apis/hive/v1/clusterpool_types.go +++ b/vendor/github.com/openshift/hive/apis/hive/v1/clusterpool_types.go @@ -92,6 +92,18 @@ type ClusterPoolSpec struct { // HibernationConfig configures the hibernation/resume behavior of ClusterDeployments owned by the ClusterPool. // +optional HibernationConfig *HibernationConfig `json:"hibernationConfig"` + + // Inventory maintains a list entries consumed by the clusterpool + // to customize the default the cluster deployment + // +optional + Inventory []InventoryEntry `json:"inventory,omitempty"` + + // InventoryAttempts is the number of attempts to provision a ClusterDeployment with a given inventory entry. + // On a successful provision, the inventory entry attempts status is updated to this value. + // Negative InventoryAttempts means unlimited attempts, and recommended only for debugging purposes. + // Default number of InventoryAttempts is 5. + // +optional + InventoryAttempts *int32 `json:"inventoryAttempts,omitempty"` } type HibernationConfig struct { @@ -110,6 +122,22 @@ type HibernationConfig struct { ResumeTimeout metav1.Duration `json:"resumeTimeout"` } +// InventoryEntryKind in Kind of the inventory entry +// +kubebuilder:validation:Enum="";ClusterDeploymentCustomization +type InventoryEntryKind string + +const ClusterDeploymentCustomizationInventoryEntry InventoryEntryKind = "ClusterDeploymentCustomization" + +// InventoryEntry maintains a reference to a custom resource consumed by a clusterpool to customize the cluster deployment +type InventoryEntry struct { + // Kind denotes the kind of the referenced resource. The default is ClusterDeploymentCustomization, which is also currently the only supported value. + // +optional + Kind InventoryEntryKind `json:"kind,omitempty"` + // Name is the name of the referenced resource. + // +required + Name string `json:"name,omitempty"` +} + // ClusterPoolClaimLifetime defines the lifetimes for claims for the cluster pool. type ClusterPoolClaimLifetime struct { // Default is the default lifetime of the claim when no lifetime is set on the claim itself. @@ -197,6 +225,8 @@ const ( // ClusterPoolAllClustersCurrentCondition indicates whether all unassigned (installing or ready) // ClusterDeployments in the pool match the current configuration of the ClusterPool. ClusterPoolAllClustersCurrentCondition ClusterPoolConditionType = "AllClustersCurrent" + // ClusterPoolInventoryValidCondition is set to provide information on whether the cluster pool inventory is valid + ClusterPoolInventoryValidCondition ClusterPoolConditionType = "InventoryValid" ) // +genclient diff --git a/vendor/github.com/openshift/hive/apis/hive/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/hive/apis/hive/v1/zz_generated.deepcopy.go index 2f96e248910..a33a463adbb 100644 --- a/vendor/github.com/openshift/hive/apis/hive/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/hive/apis/hive/v1/zz_generated.deepcopy.go @@ -676,6 +676,134 @@ func (in *ClusterDeploymentCondition) DeepCopy() *ClusterDeploymentCondition { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeploymentCustomization) DeepCopyInto(out *ClusterDeploymentCustomization) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentCustomization. +func (in *ClusterDeploymentCustomization) DeepCopy() *ClusterDeploymentCustomization { + if in == nil { + return nil + } + out := new(ClusterDeploymentCustomization) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterDeploymentCustomization) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeploymentCustomizationCondition) DeepCopyInto(out *ClusterDeploymentCustomizationCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentCustomizationCondition. +func (in *ClusterDeploymentCustomizationCondition) DeepCopy() *ClusterDeploymentCustomizationCondition { + if in == nil { + return nil + } + out := new(ClusterDeploymentCustomizationCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeploymentCustomizationList) DeepCopyInto(out *ClusterDeploymentCustomizationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterDeploymentCustomization, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentCustomizationList. +func (in *ClusterDeploymentCustomizationList) DeepCopy() *ClusterDeploymentCustomizationList { + if in == nil { + return nil + } + out := new(ClusterDeploymentCustomizationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterDeploymentCustomizationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeploymentCustomizationSpec) DeepCopyInto(out *ClusterDeploymentCustomizationSpec) { + *out = *in + if in.InstallConfigPatches != nil { + in, out := &in.InstallConfigPatches, &out.InstallConfigPatches + *out = make([]PatchEntity, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentCustomizationSpec. +func (in *ClusterDeploymentCustomizationSpec) DeepCopy() *ClusterDeploymentCustomizationSpec { + if in == nil { + return nil + } + out := new(ClusterDeploymentCustomizationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeploymentCustomizationStatus) DeepCopyInto(out *ClusterDeploymentCustomizationStatus) { + *out = *in + if in.ClusterDeploymentRef != nil { + in, out := &in.ClusterDeploymentRef, &out.ClusterDeploymentRef + *out = new(corev1.ObjectReference) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ClusterDeploymentCustomizationCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentCustomizationStatus. +func (in *ClusterDeploymentCustomizationStatus) DeepCopy() *ClusterDeploymentCustomizationStatus { + if in == nil { + return nil + } + out := new(ClusterDeploymentCustomizationStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterDeploymentList) DeepCopyInto(out *ClusterDeploymentList) { *out = *in @@ -1341,6 +1469,11 @@ func (in *ClusterPoolReference) DeepCopyInto(out *ClusterPoolReference) { in, out := &in.ClaimedTimestamp, &out.ClaimedTimestamp *out = (*in).DeepCopy() } + if in.ClusterDeploymentCustomizationRef != nil { + in, out := &in.ClusterDeploymentCustomizationRef, &out.ClusterDeploymentCustomizationRef + *out = new(corev1.LocalObjectReference) + **out = **in + } return } @@ -1413,6 +1546,11 @@ func (in *ClusterPoolSpec) DeepCopyInto(out *ClusterPoolSpec) { *out = new(HibernationConfig) **out = **in } + if in.Inventory != nil { + in, out := &in.Inventory, &out.Inventory + *out = make([]InventoryEntry, len(*in)) + copy(*out, *in) + } return } @@ -2500,6 +2638,22 @@ func (in *IdentityProviderStatus) DeepCopy() *IdentityProviderStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InventoryEntry) DeepCopyInto(out *InventoryEntry) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InventoryEntry. +func (in *InventoryEntry) DeepCopy() *InventoryEntry { + if in == nil { + return nil + } + out := new(InventoryEntry) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KubeconfigSecretReference) DeepCopyInto(out *KubeconfigSecretReference) { *out = *in @@ -2989,6 +3143,22 @@ func (in *OvirtClusterDeprovision) DeepCopy() *OvirtClusterDeprovision { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PatchEntity) DeepCopyInto(out *PatchEntity) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PatchEntity. +func (in *PatchEntity) DeepCopy() *PatchEntity { + if in == nil { + return nil + } + out := new(PatchEntity) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Platform) DeepCopyInto(out *Platform) { *out = *in From b5c84d6edd0e0c00ab1087c0315391b815eb992f Mon Sep 17 00:00:00 2001 From: Alexander Braverman Masis Date: Wed, 20 Apr 2022 16:35:35 +0300 Subject: [PATCH 02/27] Fixes and improvements based on feedback --- apis/hive/v1/clusterdeployment_types.go | 6 +- .../clusterdeploymentcustomization_types.go | 39 +- apis/hive/v1/clusterpool_types.go | 30 +- apis/hive/v1/zz_generated.deepcopy.go | 6 +- ...ft.io_clusterdeploymentcustomizations.yaml | 49 +- .../hive.openshift.io_clusterdeployments.yaml | 6 +- .../crds/hive.openshift.io_clusterpools.yaml | 14 +- hack/app-sre/saas-template.yaml | 69 +-- pkg/clusterresource/openstack.go | 15 +- .../clusterdeployment_controller.go | 56 +- .../clusterpool/clusterpool_controller.go | 521 +++++++++--------- .../clusterpool_controller_test.go | 10 +- pkg/controller/clusterpool/collections.go | 26 +- .../hibernation/hibernation_controller.go | 20 +- .../hibernation_controller_test.go | 30 +- .../clusterdeployment/clusterdeployment.go | 2 +- .../apis/hive/v1/clusterdeployment_types.go | 6 +- .../clusterdeploymentcustomization_types.go | 39 +- .../hive/apis/hive/v1/clusterpool_types.go | 30 +- .../apis/hive/v1/zz_generated.deepcopy.go | 6 +- 20 files changed, 479 insertions(+), 501 deletions(-) diff --git a/apis/hive/v1/clusterdeployment_types.go b/apis/hive/v1/clusterdeployment_types.go index a8df72decb5..e842e7d639a 100644 --- a/apis/hive/v1/clusterdeployment_types.go +++ b/apis/hive/v1/clusterdeployment_types.go @@ -280,8 +280,10 @@ type ClusterPoolReference struct { // ClaimedTimestamp is the time this cluster was assigned to a ClusterClaim. This is only used for // ClusterDeployments belonging to ClusterPools. ClaimedTimestamp *metav1.Time `json:"claimedTimestamp,omitempty"` - // ClusterDeploymentCustomizationRef is the ClusterPool Inventory claimed customization for this ClusterDeployment - ClusterDeploymentCustomizationRef *corev1.LocalObjectReference `json:"clusterDeploymentCustomization"` + // CustomizationRef is the ClusterPool Inventory claimed customization for this ClusterDeployment + // The Customization exists in the ClusterPool namespace. + // +optional + CustomizationRef *corev1.LocalObjectReference `json:"clusterDeploymentCustomization,omitempty"` } // ClusterMetadata contains metadata information about the installed cluster. diff --git a/apis/hive/v1/clusterdeploymentcustomization_types.go b/apis/hive/v1/clusterdeploymentcustomization_types.go index cf738bc9a59..86917dbf0a8 100644 --- a/apis/hive/v1/clusterdeploymentcustomization_types.go +++ b/apis/hive/v1/clusterdeploymentcustomization_types.go @@ -5,6 +5,27 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +// LastApplyStatusType indicates the status of the customization on the last +// applied cluster deployment. This is needed to for inventory sorting process to +// avoid using same broken customization +type LastApplyStatusType string + +const ( + // LastApplySucceeded indicates that the customization + // worked properly on the last applied cluster deployment + LastApplySucceeded LastApplyStatusType = "Succeeded" + // LastApplyBrokenSyntax indicates that Hive failed to apply + // customization patches on install-config. More detailes would be found in + // Valid condition message. + LastApplyBrokenSyntax LastApplyStatusType = "BrokenBySyntax" + // LastApplyBrokenCloud indicates that cluser deployment provision has failed + // when used this customization. More detailes would be found in the Valid condition message. + LastApplyBrokenCloud LastApplyStatusType = "BrokenByCloud" + // LastApplyInstallationPending indicates that the customization patches have + // been successfully applied but provisioning is not completed yet. + LastApplyInstallationPending LastApplyStatusType = "InstallationPending" +) + // +genclient // +genclient:noStatus // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -23,31 +44,36 @@ type ClusterDeploymentCustomization struct { // ClusterDeploymentCustomizationSpec defines the desired state of ClusterDeploymentCustomization type ClusterDeploymentCustomizationSpec struct { - // TODO: documentation + // InstallConfigPatches is a list of patches to be applied to the install-config InstallConfigPatches []PatchEntity `json:"installConfigPatches,omitempty"` } -// TODO: documentation +// PatchEntity represent a json patch (RFC 6902) to be applied to the install-config type PatchEntity struct { + // Op is the operation to perform: add, remove, replace, move, copy, test // +required Op string `json:"op"` + // Path is the json path to the value to be modified // +required Path string `json:"path"` + // Value is the value to be used in the operation // +required Value string `json:"value"` } // ClusterDeploymentCustomizationStatus defines the observed state of ClusterDeploymentCustomization type ClusterDeploymentCustomizationStatus struct { - // TODO: documentation + // ClusterDeploymentRef is a reference to the cluster deployment that this customization is applied on // +optional - ClusterDeploymentRef *corev1.ObjectReference `json:"clusterDeploymentRef,omitempty"` + ClusterDeploymentRef *corev1.LocalObjectReference `json:"clusterDeploymentRef,omitempty"` + // LastApplyTime indicates the time when the customization was applied on a cluster deployment // +optional LastApplyTime metav1.Time `json:"lastApplyTime,omitempty"` + // LastApplyStatus indicates the customization status in the last applied cluster deployment // +optional - LastApplyStatus string `json:"lastApplyStatus,omitempty"` + LastApplyStatus LastApplyStatusType `json:"lastApplyStatus,omitempty"` // Conditions includes more detailed status for the cluster deployment customization status. // +optional @@ -77,9 +103,8 @@ type ClusterDeploymentCustomizationCondition struct { type ClusterDeploymentCustomizationConditionType string const ( - // TODO: add more types - // TODO: shorter name? ClusterDeploymentCustomizationAvailableCondition ClusterDeploymentCustomizationConditionType = "Available" + ClusterDeploymentCustomizationValid ClusterDeploymentCustomizationConditionType = "Valid" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/apis/hive/v1/clusterpool_types.go b/apis/hive/v1/clusterpool_types.go index c1bafa09a0f..c8673f9cc87 100644 --- a/apis/hive/v1/clusterpool_types.go +++ b/apis/hive/v1/clusterpool_types.go @@ -93,17 +93,10 @@ type ClusterPoolSpec struct { // +optional HibernationConfig *HibernationConfig `json:"hibernationConfig"` - // Inventory maintains a list entries consumed by the clusterpool - // to customize the default the cluster deployment + // Inventory maintains a list of entries consumed by the ClusterPool + // to customize the default the ClusterDeployment // +optional Inventory []InventoryEntry `json:"inventory,omitempty"` - - // InventoryAttempts is the number of attempts to provision a ClusterDeployment with a given inventory entry. - // On a successful provision, the inventory entry attempts status is updated to this value. - // Negative InventoryAttempts means unlimited attempts, and recommended only for debugging purposes. - // Default number of InventoryAttempts is 5. - // +optional - InventoryAttempts *int32 `json:"inventoryAttempts,omitempty"` } type HibernationConfig struct { @@ -131,7 +124,7 @@ const ClusterDeploymentCustomizationInventoryEntry InventoryEntryKind = "Cluster // InventoryEntry maintains a reference to a custom resource consumed by a clusterpool to customize the cluster deployment type InventoryEntry struct { // Kind denotes the kind of the referenced resource. The default is ClusterDeploymentCustomization, which is also currently the only supported value. - // +optional + // +kubebuilder:default=ClusterDeploymentCustomization Kind InventoryEntryKind `json:"kind,omitempty"` // Name is the name of the referenced resource. // +required @@ -229,6 +222,23 @@ const ( ClusterPoolInventoryValidCondition ClusterPoolConditionType = "InventoryValid" ) +// Inventory (in)valid reasons +const ( + // InventoryReasonValid is used when all ClusterDeploymentCustomization are + // available and when used the ClusterDeployments are successfully installed + InventoryReasonValid = "Valid" + // InventoryReasonMissing is used when one or more ClusterDeploymentCustomization are missing + InventoryReasonMissing = "Missing" + // InventoryReasonFound is used cancel a missing ClusterDeploymentCustomization + InventoryReasonFound = "Found" + // InventoryReasonBrokenByCloud is used when one or more ClusterDeployments installations failed + InventoryReasonBrokenByCloud = "BrokenByCloud" + // InvenotryReasonBrokenBySyntax is used when one or more ClusterDeploymentCustomization patching failed + InvenotryReasonBrokenBySyntax = "BrokenBySyntax" + // InventoryReasonInvalid is used when multiple reasons and ClusterDeploymentCustomizations are incompatible + InventoryReasonInvalid = "Invalid" +) + // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/apis/hive/v1/zz_generated.deepcopy.go b/apis/hive/v1/zz_generated.deepcopy.go index a33a463adbb..34e04dd115c 100644 --- a/apis/hive/v1/zz_generated.deepcopy.go +++ b/apis/hive/v1/zz_generated.deepcopy.go @@ -781,7 +781,7 @@ func (in *ClusterDeploymentCustomizationStatus) DeepCopyInto(out *ClusterDeploym *out = *in if in.ClusterDeploymentRef != nil { in, out := &in.ClusterDeploymentRef, &out.ClusterDeploymentRef - *out = new(corev1.ObjectReference) + *out = new(corev1.LocalObjectReference) **out = **in } if in.Conditions != nil { @@ -1469,8 +1469,8 @@ func (in *ClusterPoolReference) DeepCopyInto(out *ClusterPoolReference) { in, out := &in.ClaimedTimestamp, &out.ClaimedTimestamp *out = (*in).DeepCopy() } - if in.ClusterDeploymentCustomizationRef != nil { - in, out := &in.ClusterDeploymentCustomizationRef, &out.ClusterDeploymentCustomizationRef + if in.CustomizationRef != nil { + in, out := &in.CustomizationRef, &out.CustomizationRef *out = new(corev1.LocalObjectReference) **out = **in } diff --git a/config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml b/config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml index 536bf0f8ec4..2e15ea28f51 100644 --- a/config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml +++ b/config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml @@ -37,15 +37,21 @@ spec: of ClusterDeploymentCustomization properties: installConfigPatches: - description: 'TODO: documentation' + description: InstallConfigPatches is a list of patches to be applied + to the install-config items: - description: 'TODO: documentation' + description: PatchEntity represent a json patch (RFC 6902) to be + applied to the install-config properties: op: + description: 'Op is the operation to perform: add, remove, replace, + move, copy, test' type: string path: + description: Path is the json path to the value to be modified type: string value: + description: Value is the value to be used in the operation type: string required: - op @@ -59,39 +65,12 @@ spec: state of ClusterDeploymentCustomization properties: clusterDeploymentRef: - description: 'TODO: documentation' + description: ClusterDeploymentRef is a reference to the cluster deployment + that this customization is applied on properties: - apiVersion: - description: API version of the referent. - type: string - fieldPath: - description: 'If referring to a piece of an object instead of - an entire object, this string should contain a valid JSON/Go - field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within - a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" - (container with index 2 in this pod). This syntax is chosen - only to have some well-defined way of referencing a part of - an object. TODO: this design is not final and this field is - subject to change in the future.' - type: string - kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' - type: string - resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' - type: string - uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object conditions: @@ -128,8 +107,12 @@ spec: type: object type: array lastApplyStatus: + description: LastApplyStatus indicates the customization status in + the last applied cluster deployment type: string lastApplyTime: + description: LastApplyTime indicates the time when the customization + was applied on a cluster deployment format: date-time type: string type: object diff --git a/config/crds/hive.openshift.io_clusterdeployments.yaml b/config/crds/hive.openshift.io_clusterdeployments.yaml index fcee301eb70..9f52d6daf75 100644 --- a/config/crds/hive.openshift.io_clusterdeployments.yaml +++ b/config/crds/hive.openshift.io_clusterdeployments.yaml @@ -188,8 +188,9 @@ spec: format: date-time type: string clusterDeploymentCustomization: - description: ClusterDeploymentCustomizationRef is the ClusterPool - Inventory claimed customization for this ClusterDeployment + description: CustomizationRef is the ClusterPool Inventory claimed + customization for this ClusterDeployment The Customization exists + in the ClusterPool namespace. properties: name: description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names @@ -205,7 +206,6 @@ spec: the cluster was created. type: string required: - - clusterDeploymentCustomization - namespace - poolName type: object diff --git a/config/crds/hive.openshift.io_clusterpools.yaml b/config/crds/hive.openshift.io_clusterpools.yaml index 24f1f882d23..27ea0aa59ca 100644 --- a/config/crds/hive.openshift.io_clusterpools.yaml +++ b/config/crds/hive.openshift.io_clusterpools.yaml @@ -153,13 +153,14 @@ spec: type: string type: object inventory: - description: Inventory maintains a list entries consumed by the clusterpool - to customize the default the cluster deployment + description: Inventory maintains a list of entries consumed by the + ClusterPool to customize the default the ClusterDeployment items: description: InventoryEntry maintains a reference to a custom resource consumed by a clusterpool to customize the cluster deployment properties: kind: + default: ClusterDeploymentCustomization description: Kind denotes the kind of the referenced resource. The default is ClusterDeploymentCustomization, which is also currently the only supported value. @@ -172,15 +173,6 @@ spec: type: string type: object type: array - inventoryAttempts: - description: InventoryAttempts is the number of attempts to provision - a ClusterDeployment with a given inventory entry. On a successful - provision, the inventory entry attempts status is updated to this - value. Negative InventoryAttempts means unlimited attempts, and - recommended only for debugging purposes. Default number of InventoryAttempts - is 5. - format: int32 - type: integer labels: additionalProperties: type: string diff --git a/hack/app-sre/saas-template.yaml b/hack/app-sre/saas-template.yaml index 081d629f19a..ba7c4af6ddd 100644 --- a/hack/app-sre/saas-template.yaml +++ b/hack/app-sre/saas-template.yaml @@ -291,15 +291,21 @@ objects: state of ClusterDeploymentCustomization properties: installConfigPatches: - description: 'TODO: documentation' + description: InstallConfigPatches is a list of patches to be applied + to the install-config items: - description: 'TODO: documentation' + description: PatchEntity represent a json patch (RFC 6902) to + be applied to the install-config properties: op: + description: 'Op is the operation to perform: add, remove, + replace, move, copy, test' type: string path: + description: Path is the json path to the value to be modified type: string value: + description: Value is the value to be used in the operation type: string required: - op @@ -313,39 +319,12 @@ objects: state of ClusterDeploymentCustomization properties: clusterDeploymentRef: - description: 'TODO: documentation' + description: ClusterDeploymentRef is a reference to the cluster + deployment that this customization is applied on properties: - apiVersion: - description: API version of the referent. - type: string - fieldPath: - description: 'If referring to a piece of an object instead of - an entire object, this string should contain a valid JSON/Go - field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within - a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" - (container with index 2 in this pod). This syntax is chosen - only to have some well-defined way of referencing a part of - an object. TODO: this design is not final and this field is - subject to change in the future.' - type: string - kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' - type: string - resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' - type: string - uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object conditions: @@ -383,8 +362,12 @@ objects: type: object type: array lastApplyStatus: + description: LastApplyStatus indicates the customization status + in the last applied cluster deployment type: string lastApplyTime: + description: LastApplyTime indicates the time when the customization + was applied on a cluster deployment format: date-time type: string type: object @@ -593,8 +576,9 @@ objects: format: date-time type: string clusterDeploymentCustomization: - description: ClusterDeploymentCustomizationRef is the ClusterPool - Inventory claimed customization for this ClusterDeployment + description: CustomizationRef is the ClusterPool Inventory claimed + customization for this ClusterDeployment The Customization + exists in the ClusterPool namespace. properties: name: description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names @@ -610,7 +594,6 @@ objects: the cluster was created. type: string required: - - clusterDeploymentCustomization - namespace - poolName type: object @@ -2128,14 +2111,15 @@ objects: type: string type: object inventory: - description: Inventory maintains a list entries consumed by the - clusterpool to customize the default the cluster deployment + description: Inventory maintains a list of entries consumed by the + ClusterPool to customize the default the ClusterDeployment items: description: InventoryEntry maintains a reference to a custom resource consumed by a clusterpool to customize the cluster deployment properties: kind: + default: ClusterDeploymentCustomization description: Kind denotes the kind of the referenced resource. The default is ClusterDeploymentCustomization, which is also currently the only supported value. @@ -2148,15 +2132,6 @@ objects: type: string type: object type: array - inventoryAttempts: - description: InventoryAttempts is the number of attempts to provision - a ClusterDeployment with a given inventory entry. On a successful - provision, the inventory entry attempts status is updated to this - value. Negative InventoryAttempts means unlimited attempts, and - recommended only for debugging purposes. Default number of InventoryAttempts - is 5. - format: int32 - type: integer labels: additionalProperties: type: string diff --git a/pkg/clusterresource/openstack.go b/pkg/clusterresource/openstack.go index 123cbd2a17f..f0cbc53d39d 100644 --- a/pkg/clusterresource/openstack.go +++ b/pkg/clusterresource/openstack.go @@ -15,12 +15,6 @@ import ( "github.com/openshift/hive/pkg/constants" ) -const ( - computeFlavor = "m1.large" - masterFlavor = "ci.m4.xlarge" - externalNetwork = "provider_net_shared_3" -) - var _ CloudBuilder = (*OpenStackCloudBuilder)(nil) // OpenStackCloudBuilder encapsulates cluster artifact generation logic specific to OpenStack. @@ -93,18 +87,19 @@ func (p *OpenStackCloudBuilder) addMachinePoolPlatform(o *Builder, mp *hivev1.Ma func (p *OpenStackCloudBuilder) addInstallConfigPlatform(o *Builder, ic *installertypes.InstallConfig) { ic.Platform = installertypes.Platform{ OpenStack: &installeropenstack.Platform{ - ExternalNetwork: externalNetwork, - DeprecatedFlavorName: computeFlavor, + Cloud: p.Cloud, + ExternalNetwork: p.ExternalNetwork, + DeprecatedFlavorName: p.ComputeFlavor, APIFloatingIP: p.APIFloatingIP, IngressFloatingIP: p.IngressFloatingIP, }, } ic.Compute[0].Platform.OpenStack = &installeropenstack.MachinePool{ - FlavorName: computeFlavor, + FlavorName: p.ComputeFlavor, } ic.ControlPlane.Platform.OpenStack = &installeropenstack.MachinePool{ - FlavorName: masterFlavor, + FlavorName: p.MasterFlavor, } } diff --git a/pkg/controller/clusterdeployment/clusterdeployment_controller.go b/pkg/controller/clusterdeployment/clusterdeployment_controller.go index 5e1dbb0c5cb..57aea7ed1d7 100644 --- a/pkg/controller/clusterdeployment/clusterdeployment_controller.go +++ b/pkg/controller/clusterdeployment/clusterdeployment_controller.go @@ -562,7 +562,7 @@ func (r *ReconcileClusterDeployment) reconcile(request reconcile.Request, cd *hi } if !controllerutils.HasFinalizer(cd, hivev1.FinalizerDeprovision) { - cdLog.Debugf("adding clusterdeployment finalizer") + cdLog.Debugf("adding clusterdeployment deprovision finalizer") if err := r.addClusterDeploymentFinalizer(cd); err != nil { cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error adding deprovision finalizer") return reconcile.Result{}, err @@ -571,14 +571,6 @@ func (r *ReconcileClusterDeployment) reconcile(request reconcile.Request, cd *hi return reconcile.Result{}, nil } - if cd.Spec.ClusterPoolRef != nil && cd.Spec.ClusterPoolRef.ClusterDeploymentCustomizationRef != nil && !controllerutils.HasFinalizer(cd, hivev1.FinalizerCustomizationRelease) { - cdLog.Debugf("adding customization finalizer") - if err := r.addClusterDeploymentCustomizationFinalizer(cd); err != nil { - cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error adding customization finalizer") - return reconcile.Result{}, err - } - } - if cd.Spec.ManageDNS { updated, result, err := r.ensureManagedDNSZone(cd, cdLog) if updated || err != nil { @@ -1192,19 +1184,6 @@ func (r *ReconcileClusterDeployment) setClusterStatusURLs(cd *hivev1.ClusterDepl return reconcile.Result{}, nil } -func (r *ReconcileClusterDeployment) ensureClusterDeploymentCustomizationIsReleased(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (gone bool, returnErr error) { - if cd.Spec.ClusterPoolRef == nil || cd.Spec.ClusterPoolRef.ClusterDeploymentCustomizationRef == nil { - return true, nil - } - - if err := r.releaseClusterDeploymentCustomization(cd, cdLog); err != nil { - cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error releasing inventory customization") - return false, err - } - - return true, nil -} - // ensureManagedDNSZoneDeleted is a safety check to ensure that the child managed DNSZone // linked to the parent cluster deployment gets a deletionTimestamp when the parent is deleted. // Normally we expect Kube garbage collection to do this for us, but in rare cases we've seen it @@ -1404,10 +1383,9 @@ func (r *ReconcileClusterDeployment) syncDeletedClusterDeployment(cd *hivev1.Clu return reconcile.Result{}, err } - customizationReleased := false if deprovisioned { - customizationReleased, err = r.ensureClusterDeploymentCustomizationIsReleased(cd, cdLog) - if err != nil { + if err := r.releaseCustomization(cd, cdLog); err != nil { + cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error releasing inventory customization") return reconcile.Result{}, err } } @@ -1417,10 +1395,8 @@ func (r *ReconcileClusterDeployment) syncDeletedClusterDeployment(cd *hivev1.Clu return reconcile.Result{}, nil case !dnsZoneGone: return reconcile.Result{RequeueAfter: defaultRequeueTime}, nil - case !customizationReleased: - return reconcile.Result{}, nil default: - cdLog.Infof("DNSZone gone, customization released and deprovision request completed, removing finalizer") + cdLog.Infof("DNSZone gone, customization gone and deprovision request completed, removing finalizer") if err := r.removeClusterDeploymentFinalizer(cd, cdLog); err != nil { cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error removing finalizer") return reconcile.Result{}, err @@ -1432,12 +1408,10 @@ func (r *ReconcileClusterDeployment) syncDeletedClusterDeployment(cd *hivev1.Clu func (r *ReconcileClusterDeployment) addClusterDeploymentFinalizer(cd *hivev1.ClusterDeployment) error { cd = cd.DeepCopy() controllerutils.AddFinalizer(cd, hivev1.FinalizerDeprovision) - return r.Update(context.TODO(), cd) -} + if cd.Spec.ClusterPoolRef != nil && cd.Spec.ClusterPoolRef.CustomizationRef != nil && !controllerutils.HasFinalizer(cd, hivev1.FinalizerCustomizationRelease) { + controllerutils.AddFinalizer(cd, hivev1.FinalizerCustomizationRelease) + } -func (r *ReconcileClusterDeployment) addClusterDeploymentCustomizationFinalizer(cd *hivev1.ClusterDeployment) error { - cd = cd.DeepCopy() - controllerutils.AddFinalizer(cd, hivev1.FinalizerCustomizationRelease) return r.Update(context.TODO(), cd) } @@ -1459,14 +1433,20 @@ func (r *ReconcileClusterDeployment) removeClusterDeploymentFinalizer(cd *hivev1 return nil } -func (r *ReconcileClusterDeployment) releaseClusterDeploymentCustomization(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) error { - customizationRef := cd.Spec.ClusterPoolRef.ClusterDeploymentCustomizationRef +func (r *ReconcileClusterDeployment) releaseCustomization(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) error { + if cd.Spec.ClusterPoolRef == nil || cd.Spec.ClusterPoolRef.CustomizationRef == nil { + return nil + } + customizationRef := cd.Spec.ClusterPoolRef.CustomizationRef cdc := &hivev1.ClusterDeploymentCustomization{} err := r.Client.Get(context.TODO(), client.ObjectKey{Namespace: cd.Spec.ClusterPoolRef.Namespace, Name: customizationRef.Name}, cdc) if err != nil { if apierrors.IsNotFound(err) { - cdLog.Infof("customization not found: %s/%s, nothing to release", cd.Namespace, customizationRef.Name) + cdLog.WithField("Customization", customizationRef.Name).Info("customization not found, nothing to release") controllerutils.DeleteFinalizer(cd, hivev1.FinalizerCustomizationRelease) + if err := r.Update(context.TODO(), cd); err != nil { + cdLog.WithError(err).WithField("Customization", customizationRef.Name).Error("failed to update ClusterDeployment") + } return nil } log.WithError(err).Error("error reading customization") @@ -1485,14 +1465,14 @@ func (r *ReconcileClusterDeployment) releaseClusterDeploymentCustomization(cd *h cdc.Status.Conditions = conds cdc.Status.ClusterDeploymentRef = nil if err := r.Status().Update(context.Background(), cdc); err != nil { - cdLog.Infof("Failed to update ClusterDeploymentCustomization %s condition", customizationRef.Name) + cdLog.WithError(err).WithField("Customization", customizationRef.Name).Error("failed to update ClusterDeploymentCustomizationAvailable condition") return err } } controllerutils.DeleteFinalizer(cd, hivev1.FinalizerCustomizationRelease) if err := r.Update(context.TODO(), cd); err != nil { - cdLog.Infof("Failed to update ClusterDeployment after ClusterDeploymentCustomization finalizer deletion") + cdLog.WithError(err).WithField("Customization", customizationRef.Name).Error("Failed to update ClusterDeployment after ClusterDeploymentCustomization finalizer deletion") return err } diff --git a/pkg/controller/clusterpool/clusterpool_controller.go b/pkg/controller/clusterpool/clusterpool_controller.go index 159ff5f6e52..92bbecf8f99 100644 --- a/pkg/controller/clusterpool/clusterpool_controller.go +++ b/pkg/controller/clusterpool/clusterpool_controller.go @@ -294,8 +294,8 @@ func (r *ReconcileClusterPool) Reconcile(ctx context.Context, request reconcile. if err != nil { return reconcile.Result{}, err } - r.updateInventory(clp, cds.Unassigned(false), true, "", logger) - r.updateInventory(clp, cds.Installing(), true, "", logger) + r.updateInventory(clp, cds.Unassigned(false), hivev1.LastApplySucceeded, logger) + r.updateInventory(clp, cds.Installing(), hivev1.LastApplySucceeded, logger) claims, err := getAllClaimsForPool(r.Client, clp, logger) if err != nil { @@ -380,7 +380,7 @@ func (r *ReconcileClusterPool) Reconcile(ctx context.Context, request reconcile. // consume our maxConcurrent with additions than deletions. But we put it before the // "deleteExcessClusters" case because we would rather trim broken clusters than viable ones. case len(cds.Broken()) > 0: - r.updateInventory(clp, cds.Broken(), false, "cloud broken", logger) + r.updateInventory(clp, cds.Broken(), hivev1.LastApplyBrokenCloud, logger) if err := r.deleteBrokenClusters(cds, availableCurrent, logger); err != nil { return reconcile.Result{}, err } @@ -423,104 +423,131 @@ func (r *ReconcileClusterPool) Reconcile(ctx context.Context, request reconcile. return reconcile.Result{}, nil } -func (r *ReconcileClusterPool) updateInventory(clp *hivev1.ClusterPool, cds []*hivev1.ClusterDeployment, valid bool, state string, logger log.FieldLogger) { - if clp.Spec.Inventory != nil { - var active_cdc []string - for _, cd := range cds { - cdcRef := cd.Spec.ClusterPoolRef.ClusterDeploymentCustomizationRef - active_cdc = append(active_cdc, cdcRef.Name) - cdc := &hivev1.ClusterDeploymentCustomization{} - if err := r.Client.Get(context.Background(), client.ObjectKey{Namespace: clp.Namespace, Name: cdcRef.Name}, cdc); err != nil { - if apierrors.IsNotFound(err) { - r.setInventoryValidCondition(clp, true, cdc.Name, "missing", logger) - } - log.WithError(err).Warn("error reading customization") - continue +// updateInventory ensures that the inventory of the cluster pool and the related ClusterDeploymentCustomizations are up to date. +// This includes validating resources existance and updating references when needed. +func (r *ReconcileClusterPool) updateInventory(clp *hivev1.ClusterPool, cds []*hivev1.ClusterDeployment, status hivev1.LastApplyStatusType, logger log.FieldLogger) { + // no need to update Inventory if it doesn't exist + if clp.Spec.Inventory == nil { + return + } + // InventoryValid condition and reason + reason := hivev1.InventoryReasonValid + switch status { + case hivev1.LastApplyBrokenCloud: + reason = hivev1.InventoryReasonBrokenByCloud + case hivev1.LastApplyBrokenSyntax: + reason = hivev1.InvenotryReasonBrokenBySyntax + } + + // Helper functions to get and update ClusterDeploymentCustomization, and update inventory message if missing + getCDC := func(name string) *hivev1.ClusterDeploymentCustomization { + cdc := &hivev1.ClusterDeploymentCustomization{} + if err := r.Client.Get(context.Background(), client.ObjectKey{Namespace: clp.Namespace, Name: name}, cdc); err != nil { + if apierrors.IsNotFound(err) { + reason := hivev1.InventoryReasonMissing + r.updateInventoryValidMessage(clp, cdc.Name, reason, false, logger) } + log.WithError(err).Warn("error reading customization") + return nil + } + return cdc + } + + updateCDC := func(cdc *hivev1.ClusterDeploymentCustomization, status bool, msg string) { + if err := r.setCustomizationAvailabilityCondition(cdc, status, msg, logger); err != nil { + log.WithError(err).Warn("failed to update customization status") + } + } + + // First update the inventory and ClusterDeploymentCustomizations related to changed ClusterDeployments + var active_cdc []string + for _, cd := range cds { + active_cdc = append(active_cdc, cd.Spec.ClusterPoolRef.CustomizationRef.Name) + if cdc := getCDC(cd.Spec.ClusterPoolRef.CustomizationRef.Name); cdc != nil { + r.updateInventoryValidMessage(clp, cdc.Name, reason, false, logger) + // Fix missing ClusterDeployment Reference if cdc.Status.ClusterDeploymentRef == nil { - cdc.Status.ClusterDeploymentRef = &corev1.ObjectReference{Name: cd.Name, Namespace: cd.Namespace} - } - cdc.Status.LastApplyTime = metav1.Now() - if valid { - cdc.Status.LastApplyStatus = "success" - } else { - cdc.Status.LastApplyStatus = "failed" + cdc.Status.ClusterDeploymentRef = &corev1.LocalObjectReference{Name: cd.Name} } - conds, changed := controllerutils.SetClusterDeploymentCustomizationCondition( + + cdc.Status.LastApplyStatus = status + + updateCDC(cdc, false, "reserved") + } + } + r.updateInventoryValidMessage(clp, "", "", true, logger) // just the inventory condition is being updated + sort.Strings(active_cdc) + + // Next update the rest of the inventory and ClusterDeploymentCustomizations + for _, item := range clp.Spec.Inventory { + pos := sort.SearchStrings(active_cdc, item.Name) + if !(pos < len(active_cdc) && active_cdc[pos] == item.Name) { + continue + } + if cdc := getCDC(item.Name); cdc != nil { + // This will fix the message if CDC was missing before + r.updateInventoryValidMessage(clp, cdc.Name, hivev1.InventoryReasonFound, false, logger) + // The following part will try to fix the following scenarios: + // 1. CDC condition is available but it has reference to existing ClusterDeployment + // 2. CDC is reserved but it doesn't have a reference to a ClusterDeployment + currentAvailability := controllerutils.FindClusterDeploymentCustomizationCondition( cdc.Status.Conditions, hivev1.ClusterDeploymentCustomizationAvailableCondition, - corev1.ConditionFalse, - "Reservation", - "Reserving cluster deployment customization", - controllerutils.UpdateConditionIfReasonOrMessageChange, ) - if changed { - cdc.Status.Conditions = conds - } - - if err := r.Status().Update(context.Background(), cdc); err != nil { - if apierrors.IsNotFound(err) { - r.setInventoryValidCondition(clp, true, cdc.Name, "missing", logger) - } - log.WithError(err).Warn("failed to update customization status") - } - r.setInventoryValidCondition(clp, !valid, cdc.Name, state, logger) - } - sort.Strings(active_cdc) - for _, item := range clp.Spec.Inventory { - if sort.SearchStrings(active_cdc, item.Name) == len(active_cdc) { - cdc := &hivev1.ClusterDeploymentCustomization{} - if err := r.Client.Get(context.Background(), client.ObjectKey{Namespace: clp.Namespace, Name: item.Name}, cdc); err != nil { + if cdc.Status.ClusterDeploymentRef != nil { + cd := &hivev1.ClusterDeployment{} + ref := client.ObjectKey{Namespace: cdc.Status.ClusterDeploymentRef.Name, Name: cdc.Status.ClusterDeploymentRef.Name} + if err := r.Client.Get(context.Background(), ref, cd); err != nil { if apierrors.IsNotFound(err) { - r.setInventoryValidCondition(clp, true, cdc.Name, "missing", logger) + cdc.Status.ClusterDeploymentRef = nil } - continue } - r.setInventoryValidCondition(clp, false, cdc.Name, "missing", logger) - currentAvailability := controllerutils.FindClusterDeploymentCustomizationCondition( - cdc.Status.Conditions, - hivev1.ClusterDeploymentCustomizationAvailableCondition, - ) - if cdc.Status.ClusterDeploymentRef != nil { - cd := &hivev1.ClusterDeployment{} - ref := client.ObjectKey{Namespace: cdc.Status.ClusterDeploymentRef.Namespace, Name: cdc.Status.ClusterDeploymentRef.Name} - if err := r.Client.Get(context.Background(), ref, cd); err != nil { - if apierrors.IsNotFound(err) { - cdc.Status.ClusterDeploymentRef = nil - } - } + } + availableWithCD := (currentAvailability != nil && currentAvailability.Status == corev1.ConditionTrue) && cdc.Status.ClusterDeploymentRef != nil + reservedWithoutCD := (currentAvailability != nil && currentAvailability.Status == corev1.ConditionFalse) && cdc.Status.ClusterDeploymentRef == nil + if availableWithCD || reservedWithoutCD { + status := true + msg := "Available" + if availableWithCD { + status = false + msg = "Fixed reservation" } - availableWithCD := (currentAvailability != nil && currentAvailability.Status == corev1.ConditionTrue) && cdc.Status.ClusterDeploymentRef != nil - reservedWithoutCD := (currentAvailability != nil && currentAvailability.Status == corev1.ConditionFalse) && cdc.Status.ClusterDeploymentRef == nil - if availableWithCD || reservedWithoutCD { - status := corev1.ConditionTrue - reason := "available" - message := "Available" - if availableWithCD { - status = corev1.ConditionFalse - reason = "Reservation" - message = "Fixed reservation" - } - conds, changed := controllerutils.SetClusterDeploymentCustomizationCondition( - cdc.Status.Conditions, - hivev1.ClusterDeploymentCustomizationAvailableCondition, - status, - reason, - message, - controllerutils.UpdateConditionIfReasonOrMessageChange, - ) - - if changed { - cdc.Status.Conditions = conds - } - if err := r.Status().Update(context.Background(), cdc); err != nil { - log.Error("could not update broken ClusterDeploymentCustomization: %s", cdc.Name) - } - } + updateCDC(cdc, status, msg) } } + } + r.updateInventoryValidMessage(clp, "", "", true, logger) // just the inventory condition is being updated +} + +func (r *ReconcileClusterPool) setCustomizationAvailabilityCondition(cdc *hivev1.ClusterDeploymentCustomization, available bool, message string, logger log.FieldLogger) error { + status := corev1.ConditionTrue + reason := "Available" + + if !available { + status = corev1.ConditionFalse + reason = "Reserved" + } + + conditions, changed := controllerutils.SetClusterDeploymentCustomizationCondition( + cdc.Status.Conditions, + hivev1.ClusterDeploymentCustomizationAvailableCondition, + status, + reason, + message, + controllerutils.UpdateConditionIfReasonOrMessageChange, + ) + + if changed { + cdc.Status.Conditions = conditions + if err := r.Status().Update(context.TODO(), cdc); err != nil { + logger.WithError(err).Log(controllerutils.LogLevel(err), "could not update ClusterDeploymentCustomization conditions") + return errors.Wrap(err, "could not update ClusterDeploymentCustomization conditions") + } + } + + return nil } // reconcileRunningClusters ensures the oldest unassigned clusters are set to running, and the @@ -552,8 +579,13 @@ func (r *ReconcileClusterPool) reconcileRunningClusters( ) for i := 0; i < len(cdList); i++ { cd := cdList[i] + hibernateCondition := controllerutils.FindClusterDeploymentCondition(cd.Status.Conditions, hivev1.ClusterHibernatingCondition) + hibernateUnsupported := false + if hibernateCondition != nil && hibernateCondition.Reason == hivev1.HibernatingReasonUnsupported { + hibernateUnsupported = true + } var desiredPowerState hivev1.ClusterPowerState - if i < runningCount { + if i < runningCount || hibernateUnsupported { desiredPowerState = hivev1.ClusterPowerStateRunning } else { desiredPowerState = hivev1.ClusterPowerStateHibernating @@ -767,13 +799,7 @@ func (r *ReconcileClusterPool) createCluster( poolVersion string, logger log.FieldLogger, ) (*hivev1.ClusterDeployment, error) { - cdc := &hivev1.ClusterDeploymentCustomization{} var err error - if clp.Spec.Inventory != nil { - if cdc, err = r.getInventoryCustomization(clp, logger); err != nil { - return nil, err - } - } ns, err := r.createRandomNamespace(clp) if err != nil { @@ -818,68 +844,29 @@ func (r *ReconcileClusterPool) createCluster( poolKey := types.NamespacedName{Namespace: clp.Namespace, Name: clp.Name}.String() r.expectations.ExpectCreations(poolKey, 1) var cd *hivev1.ClusterDeployment - var ics *corev1.Secret - // Add the ClusterPoolRef to the ClusterDeployment, and move it to the end of the slice. + var secret *corev1.Secret + var cdPos int for i, obj := range objs { - var ok bool - cd, ok = obj.(*hivev1.ClusterDeployment) - if !ok { - continue - } - poolRef := poolReference(clp) - if clp.Spec.Inventory != nil { - poolRef.ClusterDeploymentCustomizationRef = &corev1.LocalObjectReference{Name: cdc.Name} - } - cd.Spec.ClusterPoolRef = &poolRef - lastIndex := len(objs) - 1 - objs[i], objs[lastIndex] = objs[lastIndex], objs[i] - } - // Apply inventory customization - if clp.Spec.Inventory != nil { - for _, obj := range objs { - if !isInstallConfigSecret(obj) { - continue - } - ics = obj.(*corev1.Secret) - installConfig, err := applyPatches(cdc.Spec.InstallConfigPatches, ics.StringData["install-config.yaml"], logger) - if err != nil { - r.setInventoryValidCondition(clp, true, cdc.Name, "config broken", logger) - cdc.Status.LastApplyStatus = "failed" - cdc.Status.LastApplyTime = metav1.Now() - conds, changed := controllerutils.SetClusterDeploymentCustomizationCondition( - cdc.Status.Conditions, - hivev1.ClusterDeploymentCustomizationAvailableCondition, - corev1.ConditionTrue, - "available", - "Available", - controllerutils.UpdateConditionIfReasonOrMessageChange, - ) - - if changed { - cdc.Status.Conditions = conds - } - - if err := r.Status().Update(context.Background(), cdc); err != nil { - if apierrors.IsNotFound(err) { - r.setInventoryValidCondition(clp, true, cdc.Name, "missing", logger) - } - return nil, errors.New("could not update ClusterDeploymentCustomization conditions") // TODO: CDC cleanup process needed - } - + if cdTmp, ok := obj.(*hivev1.ClusterDeployment); ok { + cd = cdTmp + cdPos = i + poolRef := poolReference(clp) + cd.Spec.ClusterPoolRef = &poolRef + if err := r.getInventoryCustomization(clp, cd, logger); err != nil { return nil, err - } else { - cdc.Status.LastApplyStatus = "success" - cdc.Status.LastApplyTime = metav1.Now() - cdc.Status.ClusterDeploymentRef = &corev1.ObjectReference{Name: cd.Name, Namespace: cd.Namespace} - if err := r.Status().Update(context.Background(), cdc); err != nil { - log.Warning("could not update ClusterDeploymentCustomization status") - } } - - ics.StringData["install-config.yaml"] = installConfig + } else if secretTmp := isInstallConfigSecret(obj); secretTmp != nil { + secret = secretTmp } } + if err := r.patchInstallConfig(clp, cd, secret, logger); err != nil { + return nil, err + } + + // Move the ClusterDeployment to the end of the slice + lastIndex := len(objs) - 1 + objs[cdPos], objs[lastIndex] = objs[lastIndex], objs[cdPos] // Create the resources. for _, obj := range objs { if err := r.Client.Create(context.Background(), obj.(client.Object)); err != nil { @@ -891,6 +878,62 @@ func (r *ReconcileClusterPool) createCluster( return cd, nil } +// patchInstallConfig responsible for applying ClusterDeploymentCustomization and its reservation +func (r *ReconcileClusterPool) patchInstallConfig(clp *hivev1.ClusterPool, cd *hivev1.ClusterDeployment, secret *corev1.Secret, logger log.FieldLogger) error { + if clp.Spec.Inventory == nil { + return nil + } + if cd.Spec.ClusterPoolRef.CustomizationRef == nil { + return errors.New("missing customization") + } + + cdc := &hivev1.ClusterDeploymentCustomization{} + if err := r.Client.Get(context.Background(), client.ObjectKey{Namespace: clp.Namespace, Name: cd.Spec.ClusterPoolRef.CustomizationRef.Name}, cdc); err != nil { + if apierrors.IsNotFound(err) { + r.updateInventoryValidMessage(clp, cdc.Name, hivev1.InventoryReasonMissing, true, logger) + } + return err + } + + installConfig, err := applyPatches(cdc.Spec.InstallConfigPatches, secret.StringData["install-config.yaml"], logger) + if err != nil { + r.updateInventoryValidMessage(clp, cdc.Name, hivev1.InvenotryReasonBrokenBySyntax, true, logger) + cdc.Status.LastApplyStatus = hivev1.LastApplyBrokenSyntax + if updateErr := r.Status().Update(context.Background(), cdc); updateErr != nil { + if apierrors.IsNotFound(err) { + r.updateInventoryValidMessage(clp, cdc.Name, hivev1.InventoryReasonMissing, true, logger) + } + } + + return err + } + + // Reserving ClusterDeploymentCustomization + cdc.Status.LastApplyTime = metav1.Now() + conds, changed := controllerutils.SetClusterDeploymentCustomizationCondition( + cdc.Status.Conditions, + hivev1.ClusterDeploymentCustomizationAvailableCondition, + corev1.ConditionFalse, + "reserved", + "Reserved", + controllerutils.UpdateConditionIfReasonOrMessageChange, + ) + + if changed { + cdc.Status.Conditions = conds + } + + if updateErr := r.Status().Update(context.Background(), cdc); updateErr != nil { + if apierrors.IsNotFound(err) { + r.updateInventoryValidMessage(clp, cdc.Name, hivev1.InventoryReasonMissing, true, logger) + } + return err + } + + secret.StringData["install-config.yaml"] = installConfig + return nil +} + func (r *ReconcileClusterPool) createRandomNamespace(clp *hivev1.ClusterPool) (*corev1.Namespace, error) { namespaceName := apihelpers.GetResourceName(clp.Name, utilrand.String(5)) ns := &corev1.Namespace{ @@ -1086,66 +1129,49 @@ func (r *ReconcileClusterPool) setAvailableCapacityCondition(pool *hivev1.Cluste return nil } -func (r *ReconcileClusterPool) setInventoryValidCondition(pool *hivev1.ClusterPool, add bool, cdcName string, state string, logger log.FieldLogger) error { +// updateInventoryValidMessage maintaince cluster pool inventory status. If there is an issue with any of the ClusterDeploymentCustomizations then the inventory is invalid. +// When the inventory is invalid, the functions keeps track of the reason and related ClusterDeploymentCustomization name in the condition message. +func (r *ReconcileClusterPool) updateInventoryValidMessage(pool *hivev1.ClusterPool, cdcName string, cdcState string, update bool, logger log.FieldLogger) error { currentCondition := controllerutils.FindClusterPoolCondition(pool.Status.Conditions, hivev1.ClusterPoolInventoryValidCondition) - currentMessage := map[string][]string{} - emptyMessage := map[string][]string{ - "cloud broken": {}, - "config broken": {}, - "missing": {}, - } - if currentCondition.Message == "" { - currentMessage = emptyMessage - } else { - json.Unmarshal([]byte(currentCondition.Message), ¤tMessage) - } - - remove := func(s []string, i string) []string { - sort.Strings(s) - pos := sort.SearchStrings(s, i) - if pos < len(s) { - s[pos] = s[len(s)-1] - return s[:len(s)-1] - } - return s + // Decode the current message + curMap := map[string]string{} + if err := json.Unmarshal([]byte(currentCondition.Message), &curMap); err != nil { + log.WithField("message", currentCondition.Message).Warning("Could not decode current message, reset inventory status") } - for _, removeState := range []string{"cloud broken", "config broken", "missing"} { - currentMessage[state] = remove(currentMessage[removeState], "") + // Replace the entry for this CDC -- but omit if it's valid + if cdcName != "" && cdcState != "" { + delete(curMap, cdcName) + if cdcState != hivev1.InventoryReasonValid && cdcState != hivev1.InventoryReasonFound { + curMap[cdcName] = string(cdcState) + } } - if add && currentCondition.Status == corev1.ConditionTrue { - newList := []string{cdcName} - emptyMessage[state] = newList - currentMessage = emptyMessage - } else if add { - for _, removeState := range []string{"cloud broken", "config broken", "missing"} { - if state != removeState { - currentMessage[state] = remove(currentMessage[removeState], cdcName) + + // Default condition settings to "valid" + message := "" + reason := hivev1.InventoryReasonValid + + if len(curMap) != 0 { + for _, c := range curMap { + switch { + case reason == hivev1.InventoryReasonValid: + reason = c + case reason != c: + reason = hivev1.InventoryReasonInvalid + break } } - sort.Strings(currentMessage[state]) - pos := sort.SearchStrings(currentMessage[state], cdcName) - if pos == len(currentMessage[state]) { - currentMessage[state] = append(currentMessage[state], cdcName) - } - } else if state == "" { - for _, removeState := range []string{"cloud broken", "config broken", "missing"} { - currentMessage[state] = remove(currentMessage[removeState], cdcName) + messageByte, err := json.Marshal(curMap) + if err != nil { + log.WithError(err).Error("Could not encode current message") + return err } - return nil - } else { - currentMessage[state] = remove(currentMessage[state], cdcName) + message = string(messageByte) } status := corev1.ConditionTrue - reason := "InventoryValid" - if (len(currentMessage["cloud broken"]) + len(currentMessage["config broken"]) + len(currentMessage["missing"])) > 0 { + if reason != hivev1.InventoryReasonValid { status = corev1.ConditionFalse - reason = "Invalid" - } - messageByte, err := json.Marshal(currentMessage) - if err != nil { - return errors.Wrap(err, "could not update ClusterPool conditions") } conditions, changed := controllerutils.SetClusterPoolConditionWithChangeCheck( @@ -1153,17 +1179,20 @@ func (r *ReconcileClusterPool) setInventoryValidCondition(pool *hivev1.ClusterPo hivev1.ClusterPoolInventoryValidCondition, status, reason, - string(messageByte), + message, controllerutils.UpdateConditionIfReasonOrMessageChange, ) if changed { pool.Status.Conditions = conditions - if err := r.Status().Update(context.TODO(), pool); err != nil { - logger.WithError(err).Log(controllerutils.LogLevel(err), "could not update ClusterPool conditions") - return errors.Wrap(err, "could not update ClusterPool conditions") + if update { + if err := r.Status().Update(context.TODO(), pool); err != nil { + logger.WithError(err).Log(controllerutils.LogLevel(err), "could not update ClusterPool conditions") + return errors.Wrap(err, "could not update ClusterPool conditions") + } } } + return nil } @@ -1274,17 +1303,21 @@ func (r *ReconcileClusterPool) createCloudBuilder(pool *hivev1.ClusterPool, logg } } -// INFO: [Fairness](https://github.com/openshift/hive/blob/master/docs/enhancements/clusterpool-inventory.md#fairness) -// The function loops over the list of inventory items and picks the first available customization. -// Failing to apply a customization (in any cluster pool) will cause to change its status to unvailable and a new cluster will be queued. -func (r *ReconcileClusterPool) getInventoryCustomization(pool *hivev1.ClusterPool, logger log.FieldLogger) (*hivev1.ClusterDeploymentCustomization, error) { - var inventory ClusterDeploymentCustomizations +// getInventoryCustomization retrieves available ClusterDeploymentCustomizations +// and picks oldest successful customizations to avoid using the same broken +// customization. When customizations have the same last apply status, the +// oldest used customization will be prioritized. +func (r *ReconcileClusterPool) getInventoryCustomization(pool *hivev1.ClusterPool, cd *hivev1.ClusterDeployment, logger log.FieldLogger) error { + if pool.Spec.Inventory == nil { + return nil + } + var inventory []hivev1.ClusterDeploymentCustomization for _, entry := range pool.Spec.Inventory { - if entry.Kind == hivev1.ClusterDeploymentCustomizationInventoryEntry || entry.Kind == "" { + if entry.Kind == hivev1.ClusterDeploymentCustomizationInventoryEntry { cdc := &hivev1.ClusterDeploymentCustomization{} if err := r.Client.Get(context.Background(), client.ObjectKey{Namespace: pool.Namespace, Name: entry.Name}, cdc); err != nil { if apierrors.IsNotFound(err) { - r.setInventoryValidCondition(pool, true, cdc.Name, "missing", logger) + r.updateInventoryValidMessage(pool, cdc.Name, hivev1.InventoryReasonMissing, true, logger) } continue } @@ -1297,33 +1330,27 @@ func (r *ReconcileClusterPool) getInventoryCustomization(pool *hivev1.ClusterPoo } } } - if inventory.Len() > 0 { - sort.Sort(inventory) - cdc := &inventory[0] - conds, changed := controllerutils.SetClusterDeploymentCustomizationCondition( - cdc.Status.Conditions, - hivev1.ClusterDeploymentCustomizationAvailableCondition, - corev1.ConditionFalse, - "Reservation", - "Reserving cluster deployment customization", - controllerutils.UpdateConditionIfReasonOrMessageChange, - ) - if changed { - cdc.Status.Conditions = conds - if err := r.Status().Update(context.Background(), cdc); err != nil { - if apierrors.IsNotFound(err) { - r.setInventoryValidCondition(pool, true, cdc.Name, "missing", logger) + if len(inventory) > 0 { + sort.Slice( + inventory, + func(i, j int) bool { + if inventory[i].Status.LastApplyStatus == inventory[j].Status.LastApplyStatus { + return inventory[i].Status.LastApplyTime.Before(&inventory[j].Status.LastApplyTime) } - return nil, errors.New("could not update ClusterDeploymentCustomization conditions") - } - } - - return cdc, nil + if inventory[i].Status.LastApplyStatus == hivev1.LastApplySucceeded { + return inventory[i].Name < inventory[j].Name + } + return inventory[i].Name < inventory[j].Name + }, + ) + cd.Spec.ClusterPoolRef.CustomizationRef = &corev1.LocalObjectReference{Name: inventory[0].Name} + return nil } - return nil, errors.New("no customization available") + return errors.New("no customization available") } +// applyPatches is for applying JSON patches (RFC 6902) on install-config (YAML format) func applyPatches(patches []hivev1.PatchEntity, data string, logger log.FieldLogger) (string, error) { targetJson, err := yaml.YAMLToJSON([]byte(data)) if err != nil { @@ -1354,36 +1381,12 @@ func applyPatches(patches []hivev1.PatchEntity, data string, logger log.FieldLog return string(patchedYaml), nil } -func isInstallConfigSecret(obj interface{}) bool { +func isInstallConfigSecret(obj interface{}) *corev1.Secret { if secret, ok := obj.(*corev1.Secret); ok { _, ok := secret.StringData["install-config.yaml"] if ok { - return true + return secret } } - return false -} - -// ClusterDeploymentCustomizations is a list ClusterDeploymentCustomization objects -type ClusterDeploymentCustomizations []hivev1.ClusterDeploymentCustomization - -// Len is the number of elements in the collection. -func (c ClusterDeploymentCustomizations) Len() int { return len(c) } - -// Less reports whether the element with index i should sort before the element with index j. -func (c ClusterDeploymentCustomizations) Less(i, j int) bool { - status_a := c[i].Status.LastApplyStatus - status_b := c[j].Status.LastApplyStatus - time_a := c[i].Status.LastApplyTime - time_b := c[j].Status.LastApplyTime - if status_a == status_b { - return time_a.Before(&time_b) - } - if status_a == "success" { - return false - } - return true + return nil } - -// Swap swaps the elements with indexes i and j. -func (c ClusterDeploymentCustomizations) Swap(i, j int) { c[i], c[j] = c[j], c[i] } diff --git a/pkg/controller/clusterpool/clusterpool_controller_test.go b/pkg/controller/clusterpool/clusterpool_controller_test.go index 4cbd53e3042..52c578db082 100644 --- a/pkg/controller/clusterpool/clusterpool_controller_test.go +++ b/pkg/controller/clusterpool/clusterpool_controller_test.go @@ -80,7 +80,7 @@ func TestReconcileClusterPool(t *testing.T) { }), ) - inventoryPoolVersion := "17d682718ef4859e" + inventoryPoolVersion := "06983eaafac7f695" inventroyPoolBuilder := initializedPoolBuilder.Options( testcp.WithInventory([]hivev1.InventoryEntry{ { @@ -201,7 +201,7 @@ func TestReconcileClusterPool(t *testing.T) { expectPoolVersionChanged: true, }, { - name: "poolVersion doens't changes with existing Inventory", + name: "poolVersion doens't change with existing Inventory", existing: []runtime.Object{ inventroyPoolBuilder.Build(testcp.WithInventory( []hivev1.InventoryEntry{ @@ -216,7 +216,7 @@ func TestReconcileClusterPool(t *testing.T) { expectPoolVersionChanged: false, }, { - name: "poolVersion doens't changes with existing Inventory 2", + name: "poolVersion doens't change with existing Inventory 2", existing: []runtime.Object{ inventroyPoolBuilder.Build(), }, @@ -224,7 +224,7 @@ func TestReconcileClusterPool(t *testing.T) { expectPoolVersionChanged: false, }, { - name: "customized clusterpool will creates a cluster", + name: "customized clusterpool will create a cluster", existing: []runtime.Object{ inventroyPoolBuilder.Build(testcp.WithSize(1)), }, @@ -232,7 +232,7 @@ func TestReconcileClusterPool(t *testing.T) { expectedTotalClusters: 1, expectedObservedSize: 0, expectedObservedReady: 0, - expectedInventoryVaildStatus: corev1.ConditionUnknown, + expectedInventoryVaildStatus: corev1.ConditionTrue, }, { name: "customized clusterpool inventory valid", diff --git a/pkg/controller/clusterpool/collections.go b/pkg/controller/clusterpool/collections.go index 8751fa21f76..b57adfe1916 100644 --- a/pkg/controller/clusterpool/collections.go +++ b/pkg/controller/clusterpool/collections.go @@ -228,6 +228,19 @@ func isBroken(cd *hivev1.ClusterDeployment, pool *hivev1.ClusterPool, logger log logger.Infof("Cluster %s is broken due to ProvisionStopped", cd.Name) return true } + if cd.Spec.ClusterPoolRef != nil && cd.Spec.ClusterPoolRef.CustomizationRef != nil { + customizationExists := false + cdcName := cd.Spec.ClusterPoolRef.CustomizationRef.Name + for _, entry := range pool.Spec.Inventory { + if cdcName == entry.Name { + customizationExists = true + } + } + if !customizationExists { + logger.Infof("Cluster %s is broken due to removed customization %s", cd.Name, cdcName) + return true + } + } //// // Check for resume timeout @@ -263,19 +276,6 @@ func isBroken(cd *hivev1.ClusterDeployment, pool *hivev1.ClusterPool, logger log logger.Infof("Cluster %s is broken due to resume timeout", cd.Name) return true } - if cd.Spec.ClusterPoolRef != nil && cd.Spec.ClusterPoolRef.ClusterDeploymentCustomizationRef != nil { - customizationExists := false - cdcName := cd.Spec.ClusterPoolRef.ClusterDeploymentCustomizationRef.Name - for _, entry := range pool.Spec.Inventory { - if cdcName == entry.Name { - customizationExists = true - } - } - if !customizationExists { - logger.Infof("Cluster %s is broken due to removed customization %s", cd.Name, cdcName) - return true - } - } return false } diff --git a/pkg/controller/hibernation/hibernation_controller.go b/pkg/controller/hibernation/hibernation_controller.go index de8b235ba97..f3eb9190041 100644 --- a/pkg/controller/hibernation/hibernation_controller.go +++ b/pkg/controller/hibernation/hibernation_controller.go @@ -222,7 +222,9 @@ func (r *hibernationReconciler) Reconcile(ctx context.Context, request reconcile // set hibernating condition to false for unsupported clouds changed := r.setCDCondition(cd, hivev1.ClusterHibernatingCondition, hivev1.HibernatingReasonUnsupported, msg, corev1.ConditionFalse, cdLog) - if changed { + rChanged := r.setCDCondition(cd, hivev1.ClusterReadyCondition, hivev1.ReadyReasonRunning, clusterRunningMsg, + corev1.ConditionTrue, cdLog) + if changed || rChanged { cd.Status.PowerState = hivev1.ClusterPowerStateRunning return reconcile.Result{}, r.updateClusterDeploymentStatus(cd, cdLog) } @@ -234,7 +236,6 @@ func (r *hibernationReconciler) Reconcile(ctx context.Context, request reconcile } isFakeCluster := controllerutils.IsFakeCluster(cd) - isOnpremCustomized := controllerutils.IsOnpremCustomized(cd) clusterSync := &hiveintv1alpha1.ClusterSync{} if err := r.Get(context.Background(), types.NamespacedName{Namespace: cd.Namespace, Name: cd.Name}, clusterSync); err != nil { @@ -373,15 +374,9 @@ func (r *hibernationReconciler) Reconcile(ctx context.Context, request reconcile return r.checkClusterStopped(cd, false, cdLog) } // If we get here, we're not supposed to be hibernating - if isFakeCluster || isOnpremCustomized { - changed := false - if supported, msg := r.hibernationSupported(cd); !supported { - changed = r.setCDCondition(cd, hivev1.ClusterHibernatingCondition, hivev1.HibernatingReasonUnsupported, - msg, corev1.ConditionFalse, cdLog) - } else { - changed = r.setCDCondition(cd, hivev1.ClusterHibernatingCondition, hivev1.HibernatingReasonResumingOrRunning, - clusterResumingOrRunningMsg, corev1.ConditionFalse, cdLog) - } + if isFakeCluster { + changed := r.setCDCondition(cd, hivev1.ClusterHibernatingCondition, hivev1.HibernatingReasonResumingOrRunning, + clusterResumingOrRunningMsg, corev1.ConditionFalse, cdLog) rChanged := r.setCDCondition(cd, hivev1.ClusterReadyCondition, hivev1.ReadyReasonRunning, clusterRunningMsg, corev1.ConditionTrue, cdLog) if changed || rChanged { @@ -915,6 +910,9 @@ func shouldStopMachines(cd *hivev1.ClusterDeployment, hibernatingCondition *hive // shouldStartMachines decides if machines should be started func shouldStartMachines(cd *hivev1.ClusterDeployment, hibernatingCondition *hivev1.ClusterDeploymentCondition, readyCondition *hivev1.ClusterDeploymentCondition) bool { + if hibernatingCondition.Reason == hivev1.HibernatingReasonUnsupported { + return true + } if cd.Spec.PowerState == hivev1.ClusterPowerStateHibernating { return false } diff --git a/pkg/controller/hibernation/hibernation_controller_test.go b/pkg/controller/hibernation/hibernation_controller_test.go index be0fe207a5c..c9910f25520 100644 --- a/pkg/controller/hibernation/hibernation_controller_test.go +++ b/pkg/controller/hibernation/hibernation_controller_test.go @@ -119,14 +119,7 @@ func TestReconcile(t *testing.T) { Message: "Unsupported version, need version 4.4.8 or greater"})).Build(), cs: csBuilder.Build(), setupActuator: func(actuator *mock.MockHibernationActuator) { - actuator.EXPECT().MachinesRunning(gomock.Any(), gomock.Any(), gomock.Any()).Times(1).Return(true, nil, nil) - }, - setupRemote: func(builder *remoteclientmock.MockBuilder) { - objs := []runtime.Object{} - objs = append(objs, readyNodes()...) - objs = append(objs, readyClusterOperators()...) - c := fake.NewFakeClientWithScheme(scheme, objs...) - builder.EXPECT().Build().Times(1).Return(c, nil) + actuator.EXPECT().CanHandle(gomock.Any()).AnyTimes().Return(false) }, validate: func(t *testing.T, cd *hivev1.ClusterDeployment) { cond, runCond := getHibernatingAndRunningConditions(cd) @@ -293,22 +286,7 @@ func TestReconcile(t *testing.T) { assert.Equal(t, hivev1.ReadyReasonStoppingOrHibernating, runCond.Reason) }, }, - { - name: "customized CD will not hibernate", - cd: cdBuilder.Options(o.customized, o.shouldRun).Build(), - // The clustersync controller creates a ClusterSync even when there are no syncsets - cs: csBuilder.Build(), - validate: func(t *testing.T, cd *hivev1.ClusterDeployment) { - cond, runCond := getHibernatingAndRunningConditions(cd) - require.NotNil(t, cond) - assert.Equal(t, corev1.ConditionFalse, cond.Status) - assert.Equal(t, hivev1.HibernatingReasonResumingOrRunning, cond.Reason) - assert.Equal(t, hivev1.ClusterPowerStateRunning, cd.Status.PowerState) - require.NotNil(t, runCond) - assert.Equal(t, corev1.ConditionTrue, runCond.Status) - assert.Equal(t, hivev1.ReadyReasonRunning, runCond.Reason) - }, - }, + { name: "start hibernating, syncsets not applied", cd: cdBuilder.Options(o.shouldHibernate, testcd.InstalledTimestamp(time.Now())).Build(), @@ -960,10 +938,10 @@ func TestReconcile(t *testing.T) { t.Run(test.name, func(t *testing.T) { ctrl := gomock.NewController(t) mockActuator := mock.NewMockHibernationActuator(ctrl) - mockActuator.EXPECT().CanHandle(gomock.Any()).AnyTimes().Return(true) if test.setupActuator != nil { test.setupActuator(mockActuator) } + mockActuator.EXPECT().CanHandle(gomock.Any()).AnyTimes().Return(true) mockBuilder := remoteclientmock.NewMockBuilder(ctrl) if test.setupRemote != nil { test.setupRemote(mockBuilder) @@ -1283,7 +1261,7 @@ type clusterDeploymentOptions struct{} func (*clusterDeploymentOptions) customized(cd *hivev1.ClusterDeployment) { cd.Spec.Platform.OpenStack = &hivev1openstack.Platform{} cd.Spec.ClusterPoolRef = &hivev1.ClusterPoolReference{ - ClusterDeploymentCustomizationRef: &corev1.LocalObjectReference{Name: "cdc"}, + CustomizationRef: &corev1.LocalObjectReference{Name: "cdc"}, } } diff --git a/pkg/test/clusterdeployment/clusterdeployment.go b/pkg/test/clusterdeployment/clusterdeployment.go index e2bddcb3dca..13764fbf7d7 100644 --- a/pkg/test/clusterdeployment/clusterdeployment.go +++ b/pkg/test/clusterdeployment/clusterdeployment.go @@ -247,6 +247,6 @@ func WithClusterMetadata(clusterMetadata *hivev1.ClusterMetadata) Option { func WithClusterDeploymentCustomizationReference(cdcName string) Option { return func(clusterDeployment *hivev1.ClusterDeployment) { - clusterDeployment.Spec.ClusterPoolRef.ClusterDeploymentCustomizationRef = &corev1.LocalObjectReference{Name: cdcName} + clusterDeployment.Spec.ClusterPoolRef.CustomizationRef = &corev1.LocalObjectReference{Name: cdcName} } } diff --git a/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeployment_types.go b/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeployment_types.go index a8df72decb5..e842e7d639a 100644 --- a/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeployment_types.go +++ b/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeployment_types.go @@ -280,8 +280,10 @@ type ClusterPoolReference struct { // ClaimedTimestamp is the time this cluster was assigned to a ClusterClaim. This is only used for // ClusterDeployments belonging to ClusterPools. ClaimedTimestamp *metav1.Time `json:"claimedTimestamp,omitempty"` - // ClusterDeploymentCustomizationRef is the ClusterPool Inventory claimed customization for this ClusterDeployment - ClusterDeploymentCustomizationRef *corev1.LocalObjectReference `json:"clusterDeploymentCustomization"` + // CustomizationRef is the ClusterPool Inventory claimed customization for this ClusterDeployment + // The Customization exists in the ClusterPool namespace. + // +optional + CustomizationRef *corev1.LocalObjectReference `json:"clusterDeploymentCustomization,omitempty"` } // ClusterMetadata contains metadata information about the installed cluster. diff --git a/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go b/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go index cf738bc9a59..86917dbf0a8 100644 --- a/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go +++ b/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go @@ -5,6 +5,27 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +// LastApplyStatusType indicates the status of the customization on the last +// applied cluster deployment. This is needed to for inventory sorting process to +// avoid using same broken customization +type LastApplyStatusType string + +const ( + // LastApplySucceeded indicates that the customization + // worked properly on the last applied cluster deployment + LastApplySucceeded LastApplyStatusType = "Succeeded" + // LastApplyBrokenSyntax indicates that Hive failed to apply + // customization patches on install-config. More detailes would be found in + // Valid condition message. + LastApplyBrokenSyntax LastApplyStatusType = "BrokenBySyntax" + // LastApplyBrokenCloud indicates that cluser deployment provision has failed + // when used this customization. More detailes would be found in the Valid condition message. + LastApplyBrokenCloud LastApplyStatusType = "BrokenByCloud" + // LastApplyInstallationPending indicates that the customization patches have + // been successfully applied but provisioning is not completed yet. + LastApplyInstallationPending LastApplyStatusType = "InstallationPending" +) + // +genclient // +genclient:noStatus // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -23,31 +44,36 @@ type ClusterDeploymentCustomization struct { // ClusterDeploymentCustomizationSpec defines the desired state of ClusterDeploymentCustomization type ClusterDeploymentCustomizationSpec struct { - // TODO: documentation + // InstallConfigPatches is a list of patches to be applied to the install-config InstallConfigPatches []PatchEntity `json:"installConfigPatches,omitempty"` } -// TODO: documentation +// PatchEntity represent a json patch (RFC 6902) to be applied to the install-config type PatchEntity struct { + // Op is the operation to perform: add, remove, replace, move, copy, test // +required Op string `json:"op"` + // Path is the json path to the value to be modified // +required Path string `json:"path"` + // Value is the value to be used in the operation // +required Value string `json:"value"` } // ClusterDeploymentCustomizationStatus defines the observed state of ClusterDeploymentCustomization type ClusterDeploymentCustomizationStatus struct { - // TODO: documentation + // ClusterDeploymentRef is a reference to the cluster deployment that this customization is applied on // +optional - ClusterDeploymentRef *corev1.ObjectReference `json:"clusterDeploymentRef,omitempty"` + ClusterDeploymentRef *corev1.LocalObjectReference `json:"clusterDeploymentRef,omitempty"` + // LastApplyTime indicates the time when the customization was applied on a cluster deployment // +optional LastApplyTime metav1.Time `json:"lastApplyTime,omitempty"` + // LastApplyStatus indicates the customization status in the last applied cluster deployment // +optional - LastApplyStatus string `json:"lastApplyStatus,omitempty"` + LastApplyStatus LastApplyStatusType `json:"lastApplyStatus,omitempty"` // Conditions includes more detailed status for the cluster deployment customization status. // +optional @@ -77,9 +103,8 @@ type ClusterDeploymentCustomizationCondition struct { type ClusterDeploymentCustomizationConditionType string const ( - // TODO: add more types - // TODO: shorter name? ClusterDeploymentCustomizationAvailableCondition ClusterDeploymentCustomizationConditionType = "Available" + ClusterDeploymentCustomizationValid ClusterDeploymentCustomizationConditionType = "Valid" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/github.com/openshift/hive/apis/hive/v1/clusterpool_types.go b/vendor/github.com/openshift/hive/apis/hive/v1/clusterpool_types.go index c1bafa09a0f..c8673f9cc87 100644 --- a/vendor/github.com/openshift/hive/apis/hive/v1/clusterpool_types.go +++ b/vendor/github.com/openshift/hive/apis/hive/v1/clusterpool_types.go @@ -93,17 +93,10 @@ type ClusterPoolSpec struct { // +optional HibernationConfig *HibernationConfig `json:"hibernationConfig"` - // Inventory maintains a list entries consumed by the clusterpool - // to customize the default the cluster deployment + // Inventory maintains a list of entries consumed by the ClusterPool + // to customize the default the ClusterDeployment // +optional Inventory []InventoryEntry `json:"inventory,omitempty"` - - // InventoryAttempts is the number of attempts to provision a ClusterDeployment with a given inventory entry. - // On a successful provision, the inventory entry attempts status is updated to this value. - // Negative InventoryAttempts means unlimited attempts, and recommended only for debugging purposes. - // Default number of InventoryAttempts is 5. - // +optional - InventoryAttempts *int32 `json:"inventoryAttempts,omitempty"` } type HibernationConfig struct { @@ -131,7 +124,7 @@ const ClusterDeploymentCustomizationInventoryEntry InventoryEntryKind = "Cluster // InventoryEntry maintains a reference to a custom resource consumed by a clusterpool to customize the cluster deployment type InventoryEntry struct { // Kind denotes the kind of the referenced resource. The default is ClusterDeploymentCustomization, which is also currently the only supported value. - // +optional + // +kubebuilder:default=ClusterDeploymentCustomization Kind InventoryEntryKind `json:"kind,omitempty"` // Name is the name of the referenced resource. // +required @@ -229,6 +222,23 @@ const ( ClusterPoolInventoryValidCondition ClusterPoolConditionType = "InventoryValid" ) +// Inventory (in)valid reasons +const ( + // InventoryReasonValid is used when all ClusterDeploymentCustomization are + // available and when used the ClusterDeployments are successfully installed + InventoryReasonValid = "Valid" + // InventoryReasonMissing is used when one or more ClusterDeploymentCustomization are missing + InventoryReasonMissing = "Missing" + // InventoryReasonFound is used cancel a missing ClusterDeploymentCustomization + InventoryReasonFound = "Found" + // InventoryReasonBrokenByCloud is used when one or more ClusterDeployments installations failed + InventoryReasonBrokenByCloud = "BrokenByCloud" + // InvenotryReasonBrokenBySyntax is used when one or more ClusterDeploymentCustomization patching failed + InvenotryReasonBrokenBySyntax = "BrokenBySyntax" + // InventoryReasonInvalid is used when multiple reasons and ClusterDeploymentCustomizations are incompatible + InventoryReasonInvalid = "Invalid" +) + // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/github.com/openshift/hive/apis/hive/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/hive/apis/hive/v1/zz_generated.deepcopy.go index a33a463adbb..34e04dd115c 100644 --- a/vendor/github.com/openshift/hive/apis/hive/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/hive/apis/hive/v1/zz_generated.deepcopy.go @@ -781,7 +781,7 @@ func (in *ClusterDeploymentCustomizationStatus) DeepCopyInto(out *ClusterDeploym *out = *in if in.ClusterDeploymentRef != nil { in, out := &in.ClusterDeploymentRef, &out.ClusterDeploymentRef - *out = new(corev1.ObjectReference) + *out = new(corev1.LocalObjectReference) **out = **in } if in.Conditions != nil { @@ -1469,8 +1469,8 @@ func (in *ClusterPoolReference) DeepCopyInto(out *ClusterPoolReference) { in, out := &in.ClaimedTimestamp, &out.ClaimedTimestamp *out = (*in).DeepCopy() } - if in.ClusterDeploymentCustomizationRef != nil { - in, out := &in.ClusterDeploymentCustomizationRef, &out.ClusterDeploymentCustomizationRef + if in.CustomizationRef != nil { + in, out := &in.CustomizationRef, &out.CustomizationRef *out = new(corev1.LocalObjectReference) **out = **in } From aad3a770f22273086a14f56978e722ba336fb061 Mon Sep 17 00:00:00 2001 From: Alexander Braverman Masis Date: Mon, 25 Apr 2022 16:42:01 +0300 Subject: [PATCH 03/27] custom-resource-status make vendor added more stuff --- apis/go.mod | 1 + apis/go.sum | 46 +++- .../clusterdeploymentcustomization_types.go | 43 ++-- apis/hive/v1/zz_generated.deepcopy.go | 22 +- .../openshift/custom-resource-status/LICENSE | 201 ++++++++++++++++++ .../conditions/v1/conditions.go | 104 +++++++++ .../conditions/v1/doc.go | 9 + .../conditions/v1/types.go | 51 +++++ .../conditions/v1/zz_generated.deepcopy.go | 23 ++ apis/vendor/modules.txt | 3 + ...ft.io_clusterdeploymentcustomizations.yaml | 20 +- go.mod | 1 + go.sum | 18 ++ hack/app-sre/saas-template.yaml | 21 +- .../clusterdeployment_controller.go | 27 ++- .../clusterpool/clusterpool_controller.go | 50 ++--- pkg/controller/utils/conditions.go | 47 +--- .../openshift/custom-resource-status/LICENSE | 201 ++++++++++++++++++ .../conditions/v1/conditions.go | 104 +++++++++ .../conditions/v1/doc.go | 9 + .../conditions/v1/types.go | 51 +++++ .../conditions/v1/zz_generated.deepcopy.go | 23 ++ .../clusterdeploymentcustomization_types.go | 43 ++-- .../apis/hive/v1/zz_generated.deepcopy.go | 22 +- vendor/modules.txt | 3 + 25 files changed, 927 insertions(+), 216 deletions(-) create mode 100644 apis/vendor/github.com/openshift/custom-resource-status/LICENSE create mode 100644 apis/vendor/github.com/openshift/custom-resource-status/conditions/v1/conditions.go create mode 100644 apis/vendor/github.com/openshift/custom-resource-status/conditions/v1/doc.go create mode 100644 apis/vendor/github.com/openshift/custom-resource-status/conditions/v1/types.go create mode 100644 apis/vendor/github.com/openshift/custom-resource-status/conditions/v1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/custom-resource-status/LICENSE create mode 100644 vendor/github.com/openshift/custom-resource-status/conditions/v1/conditions.go create mode 100644 vendor/github.com/openshift/custom-resource-status/conditions/v1/doc.go create mode 100644 vendor/github.com/openshift/custom-resource-status/conditions/v1/types.go create mode 100644 vendor/github.com/openshift/custom-resource-status/conditions/v1/zz_generated.deepcopy.go diff --git a/apis/go.mod b/apis/go.mod index 8deb5f4524c..b7db25e0310 100644 --- a/apis/go.mod +++ b/apis/go.mod @@ -4,6 +4,7 @@ go 1.18 require ( github.com/openshift/api v0.0.0-20220531073726-6c4f186339a7 + github.com/openshift/custom-resource-status v1.1.2 k8s.io/api v0.24.1 k8s.io/apimachinery v0.24.1 ) diff --git a/apis/go.sum b/apis/go.sum index f3291b13514..6d6c0514b76 100644 --- a/apis/go.sum +++ b/apis/go.sum @@ -6,6 +6,9 @@ github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdko github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/dave/dst v0.26.2/go.mod h1:UMDJuIRPfyUCC78eFuB+SV/WI8oDeyFDvM/JR6NI3IU= @@ -20,6 +23,7 @@ github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3 github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.15.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= @@ -36,8 +40,11 @@ github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34 github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= +github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -65,11 +72,15 @@ github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20181127221834-b4f47329b966/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= +github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= @@ -85,6 +96,7 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -98,16 +110,23 @@ github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= github.com/openshift/api v0.0.0-20220531073726-6c4f186339a7 h1:bkBOsI/Yd+cBT+/aXkbbNo+imvq4VKRusoCluIGOBBg= github.com/openshift/api v0.0.0-20220531073726-6c4f186339a7/go.mod h1:LEnw1IVscIxyDnltE3Wi7bQb/QzIM8BfPNKoGA1Qlxw= github.com/openshift/build-machinery-go v0.0.0-20211213093930-7e33a7eb4ce3/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= +github.com/openshift/custom-resource-status v1.1.2 h1:C3DL44LEbvlbItfd8mT5jWrqPfHnSOQoQf/sypqA6A4= +github.com/openshift/custom-resource-status v1.1.2/go.mod h1:DB/Mf2oTeiAmVVX1gN+NEqweonAPY0TKUwADizj8+ZA= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -126,6 +145,7 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= golang.org/x/arch v0.0.0-20180920145803-b19384d3c130/go.mod h1:cYlCBUl1MsqxdiKgmc4uh7TxZfWSFLOGSRR090WDxt8= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -139,6 +159,7 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -153,7 +174,11 @@ golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -171,14 +196,19 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -187,6 +217,7 @@ golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuX golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= @@ -199,8 +230,11 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM= +golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -251,27 +285,37 @@ gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +k8s.io/api v0.23.3/go.mod h1:w258XdGyvCmnBj/vGzQMj6kzdufJZVUwEM1U2fRJwSQ= k8s.io/api v0.24.0/go.mod h1:5Jl90IUrJHUJYEMANRURMiVvJ0g7Ax7r3R1bqO8zx8I= k8s.io/api v0.24.1 h1:BjCMRDcyEYz03joa3K1+rbshwh1Ay6oB53+iUx2H8UY= k8s.io/api v0.24.1/go.mod h1:JhoOvNiLXKTPQ60zh2g0ewpA+bnEYf5q44Flhquh4vQ= +k8s.io/apimachinery v0.23.3/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= k8s.io/apimachinery v0.24.0/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= k8s.io/apimachinery v0.24.1 h1:ShD4aDxTQKN5zNf8K1RQ2u98ELLdIW7jEnlO9uAMX/I= k8s.io/apimachinery v0.24.1/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= +k8s.io/code-generator v0.23.3/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk= k8s.io/code-generator v0.24.0/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.40.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc= k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= +k8s.io/kube-openapi v0.0.0-20220124234850-424119656bbf/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc= k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y= sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y= sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= -sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/apis/hive/v1/clusterdeploymentcustomization_types.go b/apis/hive/v1/clusterdeploymentcustomization_types.go index 86917dbf0a8..af4942841e3 100644 --- a/apis/hive/v1/clusterdeploymentcustomization_types.go +++ b/apis/hive/v1/clusterdeploymentcustomization_types.go @@ -1,6 +1,7 @@ package v1 import ( + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -15,11 +16,11 @@ const ( // worked properly on the last applied cluster deployment LastApplySucceeded LastApplyStatusType = "Succeeded" // LastApplyBrokenSyntax indicates that Hive failed to apply - // customization patches on install-config. More detailes would be found in + // customization patches on install-config. More details would be found in // Valid condition message. LastApplyBrokenSyntax LastApplyStatusType = "BrokenBySyntax" - // LastApplyBrokenCloud indicates that cluser deployment provision has failed - // when used this customization. More detailes would be found in the Valid condition message. + // LastApplyBrokenCloud indicates that cluster deployment provision has failed + // when used this customization. More details would be found in the Valid condition message. LastApplyBrokenCloud LastApplyStatusType = "BrokenByCloud" // LastApplyInstallationPending indicates that the customization patches have // been successfully applied but provisioning is not completed yet. @@ -75,44 +76,24 @@ type ClusterDeploymentCustomizationStatus struct { // +optional LastApplyStatus LastApplyStatusType `json:"lastApplyStatus,omitempty"` - // Conditions includes more detailed status for the cluster deployment customization status. + // Conditions describes the state of the operator's reconciliation functionality. + // +patchMergeKey=type + // +patchStrategy=merge // +optional - Conditions []ClusterDeploymentCustomizationCondition `json:"conditions,omitempty"` + // Conditions is a list of conditions related to operator reconciliation + Conditions []conditionsv1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` } -type ClusterDeploymentCustomizationCondition struct { - // Type is the type of the condition. - Type ClusterDeploymentCustomizationConditionType `json:"type"` - // Status is the status of the condition. - Status corev1.ConditionStatus `json:"status"` - // LastProbeTime is the last time we probed the condition. - // +optional - LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"` - // LastTransitionTime is the last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` - // Reason is a unique, one-word, CamelCase reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty"` - // Message is a human-readable message indicating details about last transition. - // +optional - Message string `json:"message,omitempty"` -} - -// ClusterDeploymentCustomizationConditionType is a valid value for ClusterDeploymentCustomizationCondition.Type -type ClusterDeploymentCustomizationConditionType string - const ( - ClusterDeploymentCustomizationAvailableCondition ClusterDeploymentCustomizationConditionType = "Available" - ClusterDeploymentCustomizationValid ClusterDeploymentCustomizationConditionType = "Valid" + ClusterDeploymentCustomizationValid conditionsv1.ConditionType = "Valid" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// ClusterDeploymentCustomizationLis contains the list of ClusterDeploymentCustomization +// ClusterDeploymentCustomizationList contains a list of ClusterDeploymentCustomizations type ClusterDeploymentCustomizationList struct { metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` + metav1.ListMeta `json:"metadata,omitempty"` Items []ClusterDeploymentCustomization `json:"items"` } diff --git a/apis/hive/v1/zz_generated.deepcopy.go b/apis/hive/v1/zz_generated.deepcopy.go index 34e04dd115c..a41584ab13e 100644 --- a/apis/hive/v1/zz_generated.deepcopy.go +++ b/apis/hive/v1/zz_generated.deepcopy.go @@ -7,6 +7,7 @@ package v1 import ( configv1 "github.com/openshift/api/config/v1" + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" agent "github.com/openshift/hive/apis/hive/v1/agent" alibabacloud "github.com/openshift/hive/apis/hive/v1/alibabacloud" aws "github.com/openshift/hive/apis/hive/v1/aws" @@ -704,24 +705,6 @@ func (in *ClusterDeploymentCustomization) DeepCopyObject() runtime.Object { return nil } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterDeploymentCustomizationCondition) DeepCopyInto(out *ClusterDeploymentCustomizationCondition) { - *out = *in - in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentCustomizationCondition. -func (in *ClusterDeploymentCustomizationCondition) DeepCopy() *ClusterDeploymentCustomizationCondition { - if in == nil { - return nil - } - out := new(ClusterDeploymentCustomizationCondition) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterDeploymentCustomizationList) DeepCopyInto(out *ClusterDeploymentCustomizationList) { *out = *in @@ -784,9 +767,10 @@ func (in *ClusterDeploymentCustomizationStatus) DeepCopyInto(out *ClusterDeploym *out = new(corev1.LocalObjectReference) **out = **in } + in.LastApplyTime.DeepCopyInto(&out.LastApplyTime) if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make([]ClusterDeploymentCustomizationCondition, len(*in)) + *out = make([]conditionsv1.Condition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } diff --git a/apis/vendor/github.com/openshift/custom-resource-status/LICENSE b/apis/vendor/github.com/openshift/custom-resource-status/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/apis/vendor/github.com/openshift/custom-resource-status/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/apis/vendor/github.com/openshift/custom-resource-status/conditions/v1/conditions.go b/apis/vendor/github.com/openshift/custom-resource-status/conditions/v1/conditions.go new file mode 100644 index 00000000000..bbeee804a2b --- /dev/null +++ b/apis/vendor/github.com/openshift/custom-resource-status/conditions/v1/conditions.go @@ -0,0 +1,104 @@ +package v1 + +import ( + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// SetStatusCondition sets the corresponding condition in conditions to newCondition. +func SetStatusCondition(conditions *[]Condition, newCondition Condition) { + if conditions == nil { + conditions = &[]Condition{} + } + existingCondition := FindStatusCondition(*conditions, newCondition.Type) + if existingCondition == nil { + newCondition.LastTransitionTime = metav1.NewTime(time.Now()) + newCondition.LastHeartbeatTime = metav1.NewTime(time.Now()) + *conditions = append(*conditions, newCondition) + return + } + + if existingCondition.Status != newCondition.Status { + existingCondition.Status = newCondition.Status + existingCondition.LastTransitionTime = metav1.NewTime(time.Now()) + } + + existingCondition.Reason = newCondition.Reason + existingCondition.Message = newCondition.Message + existingCondition.LastHeartbeatTime = metav1.NewTime(time.Now()) +} + +// SetStatusConditionNoHearbeat sets the corresponding condition in conditions to newCondition +// without setting lastHeartbeatTime. +func SetStatusConditionNoHeartbeat(conditions *[]Condition, newCondition Condition) { + if conditions == nil { + conditions = &[]Condition{} + } + existingCondition := FindStatusCondition(*conditions, newCondition.Type) + if existingCondition == nil { + newCondition.LastTransitionTime = metav1.NewTime(time.Now()) + *conditions = append(*conditions, newCondition) + return + } + + if existingCondition.Status != newCondition.Status { + existingCondition.Status = newCondition.Status + existingCondition.LastTransitionTime = metav1.NewTime(time.Now()) + } + + existingCondition.Reason = newCondition.Reason + existingCondition.Message = newCondition.Message +} + +// RemoveStatusCondition removes the corresponding conditionType from conditions. +func RemoveStatusCondition(conditions *[]Condition, conditionType ConditionType) { + if conditions == nil { + return + } + newConditions := []Condition{} + for _, condition := range *conditions { + if condition.Type != conditionType { + newConditions = append(newConditions, condition) + } + } + + *conditions = newConditions +} + +// FindStatusCondition finds the conditionType in conditions. +func FindStatusCondition(conditions []Condition, conditionType ConditionType) *Condition { + for i := range conditions { + if conditions[i].Type == conditionType { + return &conditions[i] + } + } + + return nil +} + +// IsStatusConditionTrue returns true when the conditionType is present and set to `corev1.ConditionTrue` +func IsStatusConditionTrue(conditions []Condition, conditionType ConditionType) bool { + return IsStatusConditionPresentAndEqual(conditions, conditionType, corev1.ConditionTrue) +} + +// IsStatusConditionFalse returns true when the conditionType is present and set to `corev1.ConditionFalse` +func IsStatusConditionFalse(conditions []Condition, conditionType ConditionType) bool { + return IsStatusConditionPresentAndEqual(conditions, conditionType, corev1.ConditionFalse) +} + +// IsStatusConditionUnknown returns true when the conditionType is present and set to `corev1.ConditionUnknown` +func IsStatusConditionUnknown(conditions []Condition, conditionType ConditionType) bool { + return IsStatusConditionPresentAndEqual(conditions, conditionType, corev1.ConditionUnknown) +} + +// IsStatusConditionPresentAndEqual returns true when conditionType is present and equal to status. +func IsStatusConditionPresentAndEqual(conditions []Condition, conditionType ConditionType, status corev1.ConditionStatus) bool { + for _, condition := range conditions { + if condition.Type == conditionType { + return condition.Status == status + } + } + return false +} diff --git a/apis/vendor/github.com/openshift/custom-resource-status/conditions/v1/doc.go b/apis/vendor/github.com/openshift/custom-resource-status/conditions/v1/doc.go new file mode 100644 index 00000000000..b657efeaa65 --- /dev/null +++ b/apis/vendor/github.com/openshift/custom-resource-status/conditions/v1/doc.go @@ -0,0 +1,9 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// Package v1 provides version v1 of the types and functions necessary to +// manage and inspect a slice of conditions. It is opinionated in the +// condition types provided but leaves it to the user to define additional +// types as necessary. +package v1 diff --git a/apis/vendor/github.com/openshift/custom-resource-status/conditions/v1/types.go b/apis/vendor/github.com/openshift/custom-resource-status/conditions/v1/types.go new file mode 100644 index 00000000000..950678fb970 --- /dev/null +++ b/apis/vendor/github.com/openshift/custom-resource-status/conditions/v1/types.go @@ -0,0 +1,51 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Condition represents the state of the operator's +// reconciliation functionality. +// +k8s:deepcopy-gen=true +type Condition struct { + Type ConditionType `json:"type" description:"type of condition ie. Available|Progressing|Degraded."` + + Status corev1.ConditionStatus `json:"status" description:"status of the condition, one of True, False, Unknown"` + + // +optional + Reason string `json:"reason,omitempty" description:"one-word CamelCase reason for the condition's last transition"` + + // +optional + Message string `json:"message,omitempty" description:"human-readable message indicating details about last transition"` + + // +optional + LastHeartbeatTime metav1.Time `json:"lastHeartbeatTime" description:"last time we got an update on a given condition"` + + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime" description:"last time the condition transit from one status to another"` +} + +// ConditionType is the state of the operator's reconciliation functionality. +type ConditionType string + +const ( + // ConditionAvailable indicates that the resources maintained by the operator, + // is functional and available in the cluster. + ConditionAvailable ConditionType = "Available" + + // ConditionProgressing indicates that the operator is actively making changes to the resources maintained by the + // operator + ConditionProgressing ConditionType = "Progressing" + + // ConditionDegraded indicates that the resources maintained by the operator are not functioning completely. + // An example of a degraded state would be if not all pods in a deployment were running. + // It may still be available, but it is degraded + ConditionDegraded ConditionType = "Degraded" + + // ConditionUpgradeable indicates whether the resources maintained by the operator are in a state that is safe to upgrade. + // When `False`, the resources maintained by the operator should not be upgraded and the + // message field should contain a human readable description of what the administrator should do to + // allow the operator to successfully update the resources maintained by the operator. + ConditionUpgradeable ConditionType = "Upgradeable" +) diff --git a/apis/vendor/github.com/openshift/custom-resource-status/conditions/v1/zz_generated.deepcopy.go b/apis/vendor/github.com/openshift/custom-resource-status/conditions/v1/zz_generated.deepcopy.go new file mode 100644 index 00000000000..bbbbf863d13 --- /dev/null +++ b/apis/vendor/github.com/openshift/custom-resource-status/conditions/v1/zz_generated.deepcopy.go @@ -0,0 +1,23 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Condition) DeepCopyInto(out *Condition) { + *out = *in + in.LastHeartbeatTime.DeepCopyInto(&out.LastHeartbeatTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition. +func (in *Condition) DeepCopy() *Condition { + if in == nil { + return nil + } + out := new(Condition) + in.DeepCopyInto(out) + return out +} diff --git a/apis/vendor/modules.txt b/apis/vendor/modules.txt index bff48677e9c..4e924cf9b40 100644 --- a/apis/vendor/modules.txt +++ b/apis/vendor/modules.txt @@ -21,6 +21,9 @@ github.com/modern-go/reflect2 # github.com/openshift/api v0.0.0-20220531073726-6c4f186339a7 ## explicit; go 1.16 github.com/openshift/api/config/v1 +# github.com/openshift/custom-resource-status v1.1.2 +## explicit; go 1.12 +github.com/openshift/custom-resource-status/conditions/v1 # golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd ## explicit; go 1.17 golang.org/x/net/http/httpguts diff --git a/config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml b/config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml index 2e15ea28f51..cadfca3bc45 100644 --- a/config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml +++ b/config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml @@ -74,32 +74,28 @@ spec: type: string type: object conditions: - description: Conditions includes more detailed status for the cluster - deployment customization status. + description: Conditions describes the state of the operator's reconciliation + functionality. Conditions is a list of conditions related to operator + reconciliation items: + description: Condition represents the state of the operator's reconciliation + functionality. properties: - lastProbeTime: - description: LastProbeTime is the last time we probed the condition. + lastHeartbeatTime: format: date-time type: string lastTransitionTime: - description: LastTransitionTime is the last time the condition - transitioned from one status to another. format: date-time type: string message: - description: Message is a human-readable message indicating - details about last transition. type: string reason: - description: Reason is a unique, one-word, CamelCase reason - for the condition's last transition. type: string status: - description: Status is the status of the condition. type: string type: - description: Type is the type of the condition. + description: ConditionType is the state of the operator's reconciliation + functionality. type: string required: - status diff --git a/go.mod b/go.mod index 4718e434e4b..96f91285a59 100644 --- a/go.mod +++ b/go.mod @@ -38,6 +38,7 @@ require ( github.com/openshift/cluster-api-provider-ibmcloud v0.0.1-0.20220201105455-8014e5e894b0 github.com/openshift/cluster-api-provider-ovirt v0.1.1-0.20220323121149-e3f2850dd519 github.com/openshift/cluster-autoscaler-operator v0.0.0-20211006175002-fe524080b551 + github.com/openshift/custom-resource-status v1.1.2 github.com/openshift/generic-admission-server v1.14.1-0.20200903115324-4ddcdd976480 github.com/openshift/hive/apis v0.0.0 github.com/openshift/installer v0.9.0-master.0.20220711145509-cdb9627de7ef diff --git a/go.sum b/go.sum index 64c70818b04..087cdef92d3 100644 --- a/go.sum +++ b/go.sum @@ -1046,6 +1046,8 @@ github.com/openshift/cluster-api-provider-ovirt v0.1.1-0.20220323121149-e3f2850d github.com/openshift/cluster-api-provider-ovirt v0.1.1-0.20220323121149-e3f2850dd519/go.mod h1:C7unCUThP8eqT4xQfbvg3oIDn2S9TYtb0wbBoH/SR2U= github.com/openshift/cluster-autoscaler-operator v0.0.0-20211006175002-fe524080b551 h1:nGa6igwzG7smZOACUsovgf9XG8vT96Zdyc4H6r2rqS0= github.com/openshift/cluster-autoscaler-operator v0.0.0-20211006175002-fe524080b551/go.mod h1:72ieWchfTx9U7UbQO47vhSXBoCi2IJGZhXoCezan4EM= +github.com/openshift/custom-resource-status v1.1.2 h1:C3DL44LEbvlbItfd8mT5jWrqPfHnSOQoQf/sypqA6A4= +github.com/openshift/custom-resource-status v1.1.2/go.mod h1:DB/Mf2oTeiAmVVX1gN+NEqweonAPY0TKUwADizj8+ZA= github.com/openshift/generic-admission-server v1.14.1-0.20200903115324-4ddcdd976480 h1:y47BAJFepK8Xls1c+quIOyc46OXiT9LRiqGVjIaMlSA= github.com/openshift/generic-admission-server v1.14.1-0.20200903115324-4ddcdd976480/go.mod h1:OAHL5WnZphlhVEf5fTdeGLvNwMu1B2zCWpmxJpCA35o= github.com/openshift/installer v0.9.0-master.0.20220711145509-cdb9627de7ef h1:y3d9tfJqoKLsUwOJHi3iPBYmJe4Ukj8n19SkUsZbVUA= @@ -1309,6 +1311,7 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/zclconf/go-cty v1.1.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= @@ -1470,6 +1473,7 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1531,9 +1535,11 @@ golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1639,6 +1645,7 @@ golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210817190340-bfb29a6856f2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1768,6 +1775,8 @@ golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM= +golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717 h1:hI3jKY4Hpf63ns040onEbB3dAkR/H/P83hw1TG8dD3Y= golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1883,6 +1892,7 @@ k8s.io/api v0.21.4/go.mod h1:fTVGP+M4D8+00FN2cMnJqk/eb/GH53bvmNs2SVTmpFk= k8s.io/api v0.22.0-rc.0/go.mod h1:EUcKB6RvpW74HMRUSSNwpUzrIHBdGT1FeAvOV+txic0= k8s.io/api v0.22.0/go.mod h1:0AoXXqst47OI/L0oGKq9DG61dvGRPXs7X4/B7KyjBCU= k8s.io/api v0.22.1/go.mod h1:bh13rkTp3F1XEaLGykbyRD2QaTTzPm0e/BMd8ptFONY= +k8s.io/api v0.23.3/go.mod h1:w258XdGyvCmnBj/vGzQMj6kzdufJZVUwEM1U2fRJwSQ= k8s.io/api v0.24.0/go.mod h1:5Jl90IUrJHUJYEMANRURMiVvJ0g7Ax7r3R1bqO8zx8I= k8s.io/api v0.24.1 h1:BjCMRDcyEYz03joa3K1+rbshwh1Ay6oB53+iUx2H8UY= k8s.io/api v0.24.1/go.mod h1:JhoOvNiLXKTPQ60zh2g0ewpA+bnEYf5q44Flhquh4vQ= @@ -1905,6 +1915,7 @@ k8s.io/apimachinery v0.21.4/go.mod h1:H/IM+5vH9kZRNJ4l3x/fXP/5bOPJaVP/guptnZPeCF k8s.io/apimachinery v0.22.0-rc.0/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= k8s.io/apimachinery v0.22.0/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= +k8s.io/apimachinery v0.23.3/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= k8s.io/apimachinery v0.24.0/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= k8s.io/apimachinery v0.24.1 h1:ShD4aDxTQKN5zNf8K1RQ2u98ELLdIW7jEnlO9uAMX/I= k8s.io/apimachinery v0.24.1/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= @@ -1936,6 +1947,7 @@ k8s.io/code-generator v0.21.4/go.mod h1:K3y0Bv9Cz2cOW2vXUrNZlFbflhuPvuadW6JdnN6g k8s.io/code-generator v0.22.0-rc.0/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o= k8s.io/code-generator v0.22.0/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o= k8s.io/code-generator v0.22.1/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o= +k8s.io/code-generator v0.23.3/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk= k8s.io/code-generator v0.24.0/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w= k8s.io/code-generator v0.24.1 h1:zS+dvmUNaOcvsQ4faV9hXNjsKG9/pQaLnts1Wma4RM8= k8s.io/code-generator v0.24.1/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w= @@ -1969,6 +1981,8 @@ k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.3.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.40.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc= k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-aggregator v0.18.0-beta.2/go.mod h1:O3Td9mheraINbLHH4pzoFP2gRzG0Wk1COqzdSL4rBPk= @@ -1981,6 +1995,8 @@ k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= +k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= +k8s.io/kube-openapi v0.0.0-20220124234850-424119656bbf/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 h1:Gii5eqf+GmIEwGNKQYQClCayuJCe2/4fZUvF7VG99sU= k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk= k8s.io/kubectl v0.22.0/go.mod h1:eeuP92uZbVL2UnOaf0nj9OjtI0hi/nekHU+0isURer0= @@ -1997,6 +2013,7 @@ k8s.io/utils v0.0.0-20210527160623-6fdb442a123b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/ k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210722164352-7f3ee0f31471/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc= k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= @@ -2032,6 +2049,7 @@ sigs.k8s.io/controller-tools v0.6.2/go.mod h1:oaeGpjXn6+ZSEIQkUe/+3I40PNiDYp9aea sigs.k8s.io/controller-tools v0.6.3-0.20210916130746-94401651a6c3/go.mod h1:oaeGpjXn6+ZSEIQkUe/+3I40PNiDYp9aeawbt3xTgJ8= sigs.k8s.io/controller-tools v0.9.0 h1:b/vSEPpA8hiMiyzDfLbZdCn3hoAcy3/868OHhYtHY9w= sigs.k8s.io/controller-tools v0.9.0/go.mod h1:NUkn8FTV3Sad3wWpSK7dt/145qfuQ8CKJV6j4jHC5rM= +sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y= sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= sigs.k8s.io/kube-storage-version-migrator v0.0.4 h1:qsCecgZHgdismlTt8xCmS/3numvpxrj58RWJeIg76wc= diff --git a/hack/app-sre/saas-template.yaml b/hack/app-sre/saas-template.yaml index ba7c4af6ddd..feb41a5ebdf 100644 --- a/hack/app-sre/saas-template.yaml +++ b/hack/app-sre/saas-template.yaml @@ -328,33 +328,28 @@ objects: type: string type: object conditions: - description: Conditions includes more detailed status for the cluster - deployment customization status. + description: Conditions describes the state of the operator's reconciliation + functionality. Conditions is a list of conditions related to operator + reconciliation items: + description: Condition represents the state of the operator's + reconciliation functionality. properties: - lastProbeTime: - description: LastProbeTime is the last time we probed the - condition. + lastHeartbeatTime: format: date-time type: string lastTransitionTime: - description: LastTransitionTime is the last time the condition - transitioned from one status to another. format: date-time type: string message: - description: Message is a human-readable message indicating - details about last transition. type: string reason: - description: Reason is a unique, one-word, CamelCase reason - for the condition's last transition. type: string status: - description: Status is the status of the condition. type: string type: - description: Type is the type of the condition. + description: ConditionType is the state of the operator's + reconciliation functionality. type: string required: - status diff --git a/pkg/controller/clusterdeployment/clusterdeployment_controller.go b/pkg/controller/clusterdeployment/clusterdeployment_controller.go index 57aea7ed1d7..4e25ff12e80 100644 --- a/pkg/controller/clusterdeployment/clusterdeployment_controller.go +++ b/pkg/controller/clusterdeployment/clusterdeployment_controller.go @@ -34,6 +34,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" librarygocontroller "github.com/openshift/library-go/pkg/controller" "github.com/openshift/library-go/pkg/manifest" "github.com/openshift/library-go/pkg/verify" @@ -1453,21 +1454,17 @@ func (r *ReconcileClusterDeployment) releaseCustomization(cd *hivev1.ClusterDepl return err } - conds, changed := controllerutils.SetClusterDeploymentCustomizationCondition( - cdc.Status.Conditions, - hivev1.ClusterDeploymentCustomizationAvailableCondition, - corev1.ConditionTrue, - "ClusterDeploymentCustomizationAvailable", - "Cluster Deployment Customization is available", - controllerutils.UpdateConditionIfReasonOrMessageChange, - ) - if changed { - cdc.Status.Conditions = conds - cdc.Status.ClusterDeploymentRef = nil - if err := r.Status().Update(context.Background(), cdc); err != nil { - cdLog.WithError(err).WithField("Customization", customizationRef.Name).Error("failed to update ClusterDeploymentCustomizationAvailable condition") - return err - } + conditionsv1.SetStatusCondition(&cdc.Status.Conditions, conditionsv1.Condition{ + Type: conditionsv1.ConditionAvailable, + Status: corev1.ConditionFalse, + Reason: "ClusterDeploymentCustomizationAvailable", + Message: "Cluster Deployment Customization is available", + }) + + cdc.Status.ClusterDeploymentRef = nil + if err := r.Status().Update(context.Background(), cdc); err != nil { + cdLog.WithError(err).WithField("Customization", customizationRef.Name).Error("failed to update ClusterDeploymentCustomizationAvailable condition") + return err } controllerutils.DeleteFinalizer(cd, hivev1.FinalizerCustomizationRelease) diff --git a/pkg/controller/clusterpool/clusterpool_controller.go b/pkg/controller/clusterpool/clusterpool_controller.go index 92bbecf8f99..a98b01fe6c4 100644 --- a/pkg/controller/clusterpool/clusterpool_controller.go +++ b/pkg/controller/clusterpool/clusterpool_controller.go @@ -30,6 +30,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" apihelpers "github.com/openshift/hive/apis/helpers" hivev1 "github.com/openshift/hive/apis/hive/v1" "github.com/openshift/hive/pkg/clusterresource" @@ -492,7 +493,7 @@ func (r *ReconcileClusterPool) updateInventory(clp *hivev1.ClusterPool, cds []*h // 2. CDC is reserved but it doesn't have a reference to a ClusterDeployment currentAvailability := controllerutils.FindClusterDeploymentCustomizationCondition( cdc.Status.Conditions, - hivev1.ClusterDeploymentCustomizationAvailableCondition, + conditionsv1.ConditionAvailable, ) if cdc.Status.ClusterDeploymentRef != nil { cd := &hivev1.ClusterDeployment{} @@ -530,21 +531,16 @@ func (r *ReconcileClusterPool) setCustomizationAvailabilityCondition(cdc *hivev1 reason = "Reserved" } - conditions, changed := controllerutils.SetClusterDeploymentCustomizationCondition( - cdc.Status.Conditions, - hivev1.ClusterDeploymentCustomizationAvailableCondition, - status, - reason, - message, - controllerutils.UpdateConditionIfReasonOrMessageChange, - ) + conditionsv1.SetStatusCondition(&cdc.Status.Conditions, conditionsv1.Condition{ + Type: conditionsv1.ConditionAvailable, + Status: status, + Reason: reason, + Message: message, + }) - if changed { - cdc.Status.Conditions = conditions - if err := r.Status().Update(context.TODO(), cdc); err != nil { - logger.WithError(err).Log(controllerutils.LogLevel(err), "could not update ClusterDeploymentCustomization conditions") - return errors.Wrap(err, "could not update ClusterDeploymentCustomization conditions") - } + if err := r.Status().Update(context.TODO(), cdc); err != nil { + logger.WithError(err).Log(controllerutils.LogLevel(err), "could not update ClusterDeploymentCustomization conditions") + return errors.Wrap(err, "could not update ClusterDeploymentCustomization conditions") } return nil @@ -910,20 +906,14 @@ func (r *ReconcileClusterPool) patchInstallConfig(clp *hivev1.ClusterPool, cd *h // Reserving ClusterDeploymentCustomization cdc.Status.LastApplyTime = metav1.Now() - conds, changed := controllerutils.SetClusterDeploymentCustomizationCondition( - cdc.Status.Conditions, - hivev1.ClusterDeploymentCustomizationAvailableCondition, - corev1.ConditionFalse, - "reserved", - "Reserved", - controllerutils.UpdateConditionIfReasonOrMessageChange, - ) - - if changed { - cdc.Status.Conditions = conds - } + conditionsv1.SetStatusCondition(&cdc.Status.Conditions, conditionsv1.Condition{ + Type: conditionsv1.ConditionAvailable, + Status: corev1.ConditionFalse, + Reason: "Reserved", + Message: "Reserved", + }) - if updateErr := r.Status().Update(context.Background(), cdc); updateErr != nil { + if err := r.Status().Update(context.Background(), cdc); err != nil { if apierrors.IsNotFound(err) { r.updateInventoryValidMessage(clp, cdc.Name, hivev1.InventoryReasonMissing, true, logger) } @@ -1319,11 +1309,11 @@ func (r *ReconcileClusterPool) getInventoryCustomization(pool *hivev1.ClusterPoo if apierrors.IsNotFound(err) { r.updateInventoryValidMessage(pool, cdc.Name, hivev1.InventoryReasonMissing, true, logger) } - continue + return err } currentAvailability := controllerutils.FindClusterDeploymentCustomizationCondition( cdc.Status.Conditions, - hivev1.ClusterDeploymentCustomizationAvailableCondition, + conditionsv1.ConditionAvailable, ) if currentAvailability == nil || currentAvailability.Status == corev1.ConditionTrue { inventory = append(inventory, *cdc) diff --git a/pkg/controller/utils/conditions.go b/pkg/controller/utils/conditions.go index 405d734442a..8b9943c2c72 100644 --- a/pkg/controller/utils/conditions.go +++ b/pkg/controller/utils/conditions.go @@ -6,6 +6,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" hivev1 "github.com/openshift/hive/apis/hive/v1" ) @@ -542,51 +543,7 @@ func SetMachinePoolCondition( return newConditions } -// SetClusterDeploymentCustomizationCondition sets a condition on a ClusterDeploymentCustomization resource's status -func SetClusterDeploymentCustomizationCondition( - conditions []hivev1.ClusterDeploymentCustomizationCondition, - conditionType hivev1.ClusterDeploymentCustomizationConditionType, - status corev1.ConditionStatus, - reason string, - message string, - updateConditionCheck UpdateConditionCheck, -) ([]hivev1.ClusterDeploymentCustomizationCondition, bool) { - now := metav1.Now() - changed := false - existingCondition := FindClusterDeploymentCustomizationCondition(conditions, conditionType) - if existingCondition == nil { - changed = true - conditions = append( - conditions, - hivev1.ClusterDeploymentCustomizationCondition{ - Type: conditionType, - Status: status, - Reason: reason, - Message: message, - LastTransitionTime: now, - LastProbeTime: now, - }, - ) - } else { - if shouldUpdateCondition( - existingCondition.Status, existingCondition.Reason, existingCondition.Message, - status, reason, message, - updateConditionCheck, - ) { - if existingCondition.Status != status { - existingCondition.LastTransitionTime = now - } - existingCondition.Status = status - existingCondition.Reason = reason - existingCondition.Message = message - existingCondition.LastProbeTime = now - changed = true - } - } - return conditions, changed -} - -func FindClusterDeploymentCustomizationCondition(conditions []hivev1.ClusterDeploymentCustomizationCondition, conditionType hivev1.ClusterDeploymentCustomizationConditionType) *hivev1.ClusterDeploymentCustomizationCondition { +func FindClusterDeploymentCustomizationCondition(conditions []conditionsv1.Condition, conditionType conditionsv1.ConditionType) *conditionsv1.Condition { for i, condition := range conditions { if condition.Type == conditionType { return &conditions[i] diff --git a/vendor/github.com/openshift/custom-resource-status/LICENSE b/vendor/github.com/openshift/custom-resource-status/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/openshift/custom-resource-status/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/openshift/custom-resource-status/conditions/v1/conditions.go b/vendor/github.com/openshift/custom-resource-status/conditions/v1/conditions.go new file mode 100644 index 00000000000..bbeee804a2b --- /dev/null +++ b/vendor/github.com/openshift/custom-resource-status/conditions/v1/conditions.go @@ -0,0 +1,104 @@ +package v1 + +import ( + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// SetStatusCondition sets the corresponding condition in conditions to newCondition. +func SetStatusCondition(conditions *[]Condition, newCondition Condition) { + if conditions == nil { + conditions = &[]Condition{} + } + existingCondition := FindStatusCondition(*conditions, newCondition.Type) + if existingCondition == nil { + newCondition.LastTransitionTime = metav1.NewTime(time.Now()) + newCondition.LastHeartbeatTime = metav1.NewTime(time.Now()) + *conditions = append(*conditions, newCondition) + return + } + + if existingCondition.Status != newCondition.Status { + existingCondition.Status = newCondition.Status + existingCondition.LastTransitionTime = metav1.NewTime(time.Now()) + } + + existingCondition.Reason = newCondition.Reason + existingCondition.Message = newCondition.Message + existingCondition.LastHeartbeatTime = metav1.NewTime(time.Now()) +} + +// SetStatusConditionNoHearbeat sets the corresponding condition in conditions to newCondition +// without setting lastHeartbeatTime. +func SetStatusConditionNoHeartbeat(conditions *[]Condition, newCondition Condition) { + if conditions == nil { + conditions = &[]Condition{} + } + existingCondition := FindStatusCondition(*conditions, newCondition.Type) + if existingCondition == nil { + newCondition.LastTransitionTime = metav1.NewTime(time.Now()) + *conditions = append(*conditions, newCondition) + return + } + + if existingCondition.Status != newCondition.Status { + existingCondition.Status = newCondition.Status + existingCondition.LastTransitionTime = metav1.NewTime(time.Now()) + } + + existingCondition.Reason = newCondition.Reason + existingCondition.Message = newCondition.Message +} + +// RemoveStatusCondition removes the corresponding conditionType from conditions. +func RemoveStatusCondition(conditions *[]Condition, conditionType ConditionType) { + if conditions == nil { + return + } + newConditions := []Condition{} + for _, condition := range *conditions { + if condition.Type != conditionType { + newConditions = append(newConditions, condition) + } + } + + *conditions = newConditions +} + +// FindStatusCondition finds the conditionType in conditions. +func FindStatusCondition(conditions []Condition, conditionType ConditionType) *Condition { + for i := range conditions { + if conditions[i].Type == conditionType { + return &conditions[i] + } + } + + return nil +} + +// IsStatusConditionTrue returns true when the conditionType is present and set to `corev1.ConditionTrue` +func IsStatusConditionTrue(conditions []Condition, conditionType ConditionType) bool { + return IsStatusConditionPresentAndEqual(conditions, conditionType, corev1.ConditionTrue) +} + +// IsStatusConditionFalse returns true when the conditionType is present and set to `corev1.ConditionFalse` +func IsStatusConditionFalse(conditions []Condition, conditionType ConditionType) bool { + return IsStatusConditionPresentAndEqual(conditions, conditionType, corev1.ConditionFalse) +} + +// IsStatusConditionUnknown returns true when the conditionType is present and set to `corev1.ConditionUnknown` +func IsStatusConditionUnknown(conditions []Condition, conditionType ConditionType) bool { + return IsStatusConditionPresentAndEqual(conditions, conditionType, corev1.ConditionUnknown) +} + +// IsStatusConditionPresentAndEqual returns true when conditionType is present and equal to status. +func IsStatusConditionPresentAndEqual(conditions []Condition, conditionType ConditionType, status corev1.ConditionStatus) bool { + for _, condition := range conditions { + if condition.Type == conditionType { + return condition.Status == status + } + } + return false +} diff --git a/vendor/github.com/openshift/custom-resource-status/conditions/v1/doc.go b/vendor/github.com/openshift/custom-resource-status/conditions/v1/doc.go new file mode 100644 index 00000000000..b657efeaa65 --- /dev/null +++ b/vendor/github.com/openshift/custom-resource-status/conditions/v1/doc.go @@ -0,0 +1,9 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// Package v1 provides version v1 of the types and functions necessary to +// manage and inspect a slice of conditions. It is opinionated in the +// condition types provided but leaves it to the user to define additional +// types as necessary. +package v1 diff --git a/vendor/github.com/openshift/custom-resource-status/conditions/v1/types.go b/vendor/github.com/openshift/custom-resource-status/conditions/v1/types.go new file mode 100644 index 00000000000..950678fb970 --- /dev/null +++ b/vendor/github.com/openshift/custom-resource-status/conditions/v1/types.go @@ -0,0 +1,51 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Condition represents the state of the operator's +// reconciliation functionality. +// +k8s:deepcopy-gen=true +type Condition struct { + Type ConditionType `json:"type" description:"type of condition ie. Available|Progressing|Degraded."` + + Status corev1.ConditionStatus `json:"status" description:"status of the condition, one of True, False, Unknown"` + + // +optional + Reason string `json:"reason,omitempty" description:"one-word CamelCase reason for the condition's last transition"` + + // +optional + Message string `json:"message,omitempty" description:"human-readable message indicating details about last transition"` + + // +optional + LastHeartbeatTime metav1.Time `json:"lastHeartbeatTime" description:"last time we got an update on a given condition"` + + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime" description:"last time the condition transit from one status to another"` +} + +// ConditionType is the state of the operator's reconciliation functionality. +type ConditionType string + +const ( + // ConditionAvailable indicates that the resources maintained by the operator, + // is functional and available in the cluster. + ConditionAvailable ConditionType = "Available" + + // ConditionProgressing indicates that the operator is actively making changes to the resources maintained by the + // operator + ConditionProgressing ConditionType = "Progressing" + + // ConditionDegraded indicates that the resources maintained by the operator are not functioning completely. + // An example of a degraded state would be if not all pods in a deployment were running. + // It may still be available, but it is degraded + ConditionDegraded ConditionType = "Degraded" + + // ConditionUpgradeable indicates whether the resources maintained by the operator are in a state that is safe to upgrade. + // When `False`, the resources maintained by the operator should not be upgraded and the + // message field should contain a human readable description of what the administrator should do to + // allow the operator to successfully update the resources maintained by the operator. + ConditionUpgradeable ConditionType = "Upgradeable" +) diff --git a/vendor/github.com/openshift/custom-resource-status/conditions/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/custom-resource-status/conditions/v1/zz_generated.deepcopy.go new file mode 100644 index 00000000000..bbbbf863d13 --- /dev/null +++ b/vendor/github.com/openshift/custom-resource-status/conditions/v1/zz_generated.deepcopy.go @@ -0,0 +1,23 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Condition) DeepCopyInto(out *Condition) { + *out = *in + in.LastHeartbeatTime.DeepCopyInto(&out.LastHeartbeatTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition. +func (in *Condition) DeepCopy() *Condition { + if in == nil { + return nil + } + out := new(Condition) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go b/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go index 86917dbf0a8..af4942841e3 100644 --- a/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go +++ b/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go @@ -1,6 +1,7 @@ package v1 import ( + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -15,11 +16,11 @@ const ( // worked properly on the last applied cluster deployment LastApplySucceeded LastApplyStatusType = "Succeeded" // LastApplyBrokenSyntax indicates that Hive failed to apply - // customization patches on install-config. More detailes would be found in + // customization patches on install-config. More details would be found in // Valid condition message. LastApplyBrokenSyntax LastApplyStatusType = "BrokenBySyntax" - // LastApplyBrokenCloud indicates that cluser deployment provision has failed - // when used this customization. More detailes would be found in the Valid condition message. + // LastApplyBrokenCloud indicates that cluster deployment provision has failed + // when used this customization. More details would be found in the Valid condition message. LastApplyBrokenCloud LastApplyStatusType = "BrokenByCloud" // LastApplyInstallationPending indicates that the customization patches have // been successfully applied but provisioning is not completed yet. @@ -75,44 +76,24 @@ type ClusterDeploymentCustomizationStatus struct { // +optional LastApplyStatus LastApplyStatusType `json:"lastApplyStatus,omitempty"` - // Conditions includes more detailed status for the cluster deployment customization status. + // Conditions describes the state of the operator's reconciliation functionality. + // +patchMergeKey=type + // +patchStrategy=merge // +optional - Conditions []ClusterDeploymentCustomizationCondition `json:"conditions,omitempty"` + // Conditions is a list of conditions related to operator reconciliation + Conditions []conditionsv1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` } -type ClusterDeploymentCustomizationCondition struct { - // Type is the type of the condition. - Type ClusterDeploymentCustomizationConditionType `json:"type"` - // Status is the status of the condition. - Status corev1.ConditionStatus `json:"status"` - // LastProbeTime is the last time we probed the condition. - // +optional - LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"` - // LastTransitionTime is the last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` - // Reason is a unique, one-word, CamelCase reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty"` - // Message is a human-readable message indicating details about last transition. - // +optional - Message string `json:"message,omitempty"` -} - -// ClusterDeploymentCustomizationConditionType is a valid value for ClusterDeploymentCustomizationCondition.Type -type ClusterDeploymentCustomizationConditionType string - const ( - ClusterDeploymentCustomizationAvailableCondition ClusterDeploymentCustomizationConditionType = "Available" - ClusterDeploymentCustomizationValid ClusterDeploymentCustomizationConditionType = "Valid" + ClusterDeploymentCustomizationValid conditionsv1.ConditionType = "Valid" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// ClusterDeploymentCustomizationLis contains the list of ClusterDeploymentCustomization +// ClusterDeploymentCustomizationList contains a list of ClusterDeploymentCustomizations type ClusterDeploymentCustomizationList struct { metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` + metav1.ListMeta `json:"metadata,omitempty"` Items []ClusterDeploymentCustomization `json:"items"` } diff --git a/vendor/github.com/openshift/hive/apis/hive/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/hive/apis/hive/v1/zz_generated.deepcopy.go index 34e04dd115c..a41584ab13e 100644 --- a/vendor/github.com/openshift/hive/apis/hive/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/hive/apis/hive/v1/zz_generated.deepcopy.go @@ -7,6 +7,7 @@ package v1 import ( configv1 "github.com/openshift/api/config/v1" + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" agent "github.com/openshift/hive/apis/hive/v1/agent" alibabacloud "github.com/openshift/hive/apis/hive/v1/alibabacloud" aws "github.com/openshift/hive/apis/hive/v1/aws" @@ -704,24 +705,6 @@ func (in *ClusterDeploymentCustomization) DeepCopyObject() runtime.Object { return nil } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterDeploymentCustomizationCondition) DeepCopyInto(out *ClusterDeploymentCustomizationCondition) { - *out = *in - in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentCustomizationCondition. -func (in *ClusterDeploymentCustomizationCondition) DeepCopy() *ClusterDeploymentCustomizationCondition { - if in == nil { - return nil - } - out := new(ClusterDeploymentCustomizationCondition) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterDeploymentCustomizationList) DeepCopyInto(out *ClusterDeploymentCustomizationList) { *out = *in @@ -784,9 +767,10 @@ func (in *ClusterDeploymentCustomizationStatus) DeepCopyInto(out *ClusterDeploym *out = new(corev1.LocalObjectReference) **out = **in } + in.LastApplyTime.DeepCopyInto(&out.LastApplyTime) if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make([]ClusterDeploymentCustomizationCondition, len(*in)) + *out = make([]conditionsv1.Condition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } diff --git a/vendor/modules.txt b/vendor/modules.txt index 58a75f8bc84..70757254c65 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -917,6 +917,9 @@ github.com/openshift/cluster-api-provider-ovirt/pkg/apis/ovirtprovider/v1beta1 ## explicit; go 1.16 github.com/openshift/cluster-autoscaler-operator/pkg/apis/autoscaling/v1 github.com/openshift/cluster-autoscaler-operator/pkg/apis/autoscaling/v1beta1 +# github.com/openshift/custom-resource-status v1.1.2 +## explicit; go 1.12 +github.com/openshift/custom-resource-status/conditions/v1 # github.com/openshift/generic-admission-server v1.14.1-0.20200903115324-4ddcdd976480 ## explicit; go 1.13 github.com/openshift/generic-admission-server/pkg/apiserver From 90380da8029bf512a98ff5cc22dc972778c01d85 Mon Sep 17 00:00:00 2001 From: Alexander Braverman Masis Date: Thu, 28 Apr 2022 17:28:31 +0300 Subject: [PATCH 04/27] More fixes --- apis/hive/v1/clusterdeployment_types.go | 2 +- .../clusterdeploymentcustomization_types.go | 26 ++- apis/hive/v1/clusterpool_types.go | 20 +- ...ft.io_clusterdeploymentcustomizations.yaml | 17 +- .../hive.openshift.io_clusterdeployments.yaml | 4 +- .../crds/hive.openshift.io_clusterpools.yaml | 4 +- hack/app-sre/saas-template.yaml | 23 +-- .../hive/v1/clusterdeploymentcustomization.go | 17 ++ .../fake_clusterdeploymentcustomization.go | 12 ++ .../clusterdeployment_controller.go | 54 +++-- .../clusterpool/clusterpool_controller.go | 190 +++++++++--------- pkg/controller/utils/conditions.go | 10 - .../apis/hive/v1/clusterdeployment_types.go | 2 +- .../clusterdeploymentcustomization_types.go | 26 ++- .../hive/apis/hive/v1/clusterpool_types.go | 6 +- 15 files changed, 212 insertions(+), 201 deletions(-) diff --git a/apis/hive/v1/clusterdeployment_types.go b/apis/hive/v1/clusterdeployment_types.go index e842e7d639a..e83976cf855 100644 --- a/apis/hive/v1/clusterdeployment_types.go +++ b/apis/hive/v1/clusterdeployment_types.go @@ -280,7 +280,7 @@ type ClusterPoolReference struct { // ClaimedTimestamp is the time this cluster was assigned to a ClusterClaim. This is only used for // ClusterDeployments belonging to ClusterPools. ClaimedTimestamp *metav1.Time `json:"claimedTimestamp,omitempty"` - // CustomizationRef is the ClusterPool Inventory claimed customization for this ClusterDeployment + // CustomizationRef is the ClusterPool Inventory claimed customization for this ClusterDeployment. // The Customization exists in the ClusterPool namespace. // +optional CustomizationRef *corev1.LocalObjectReference `json:"clusterDeploymentCustomization,omitempty"` diff --git a/apis/hive/v1/clusterdeploymentcustomization_types.go b/apis/hive/v1/clusterdeploymentcustomization_types.go index af4942841e3..baf95efc1a8 100644 --- a/apis/hive/v1/clusterdeploymentcustomization_types.go +++ b/apis/hive/v1/clusterdeploymentcustomization_types.go @@ -7,20 +7,20 @@ import ( ) // LastApplyStatusType indicates the status of the customization on the last -// applied cluster deployment. This is needed to for inventory sorting process to -// avoid using same broken customization +// applied cluster deployment. This is used for inventory sorting process to +// reduce the likelihood of using a broken customization repeatedly. type LastApplyStatusType string const ( // LastApplySucceeded indicates that the customization - // worked properly on the last applied cluster deployment + // worked properly on the last applied cluster deployment. LastApplySucceeded LastApplyStatusType = "Succeeded" // LastApplyBrokenSyntax indicates that Hive failed to apply // customization patches on install-config. More details would be found in // Valid condition message. LastApplyBrokenSyntax LastApplyStatusType = "BrokenBySyntax" // LastApplyBrokenCloud indicates that cluster deployment provision has failed - // when used this customization. More details would be found in the Valid condition message. + // when using this customization. More details would be found in the Valid condition message. LastApplyBrokenCloud LastApplyStatusType = "BrokenByCloud" // LastApplyInstallationPending indicates that the customization patches have // been successfully applied but provisioning is not completed yet. @@ -28,10 +28,9 @@ const ( ) // +genclient -// +genclient:noStatus // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// ClusterDeploymentCustomization is the Schema for clusterdeploymentcustomizations API +// ClusterDeploymentCustomization is the Schema for clusterdeploymentcustomizations API. // +kubebuilder:subresource:status // +k8s:openapi-gen=true // +kubebuilder:resource:scope=Namespaced @@ -43,9 +42,9 @@ type ClusterDeploymentCustomization struct { Status ClusterDeploymentCustomizationStatus `json:"status,omitempty"` } -// ClusterDeploymentCustomizationSpec defines the desired state of ClusterDeploymentCustomization +// ClusterDeploymentCustomizationSpec defines the desired state of ClusterDeploymentCustomization. type ClusterDeploymentCustomizationSpec struct { - // InstallConfigPatches is a list of patches to be applied to the install-config + // InstallConfigPatches is a list of patches to be applied to the install-config. InstallConfigPatches []PatchEntity `json:"installConfigPatches,omitempty"` } @@ -62,17 +61,17 @@ type PatchEntity struct { Value string `json:"value"` } -// ClusterDeploymentCustomizationStatus defines the observed state of ClusterDeploymentCustomization +// ClusterDeploymentCustomizationStatus defines the observed state of ClusterDeploymentCustomization. type ClusterDeploymentCustomizationStatus struct { - // ClusterDeploymentRef is a reference to the cluster deployment that this customization is applied on + // ClusterDeploymentRef is a reference to the cluster deployment that this customization is applied on. // +optional ClusterDeploymentRef *corev1.LocalObjectReference `json:"clusterDeploymentRef,omitempty"` - // LastApplyTime indicates the time when the customization was applied on a cluster deployment + // LastApplyTime indicates the time when the customization was applied on a cluster deployment. // +optional LastApplyTime metav1.Time `json:"lastApplyTime,omitempty"` - // LastApplyStatus indicates the customization status in the last applied cluster deployment + // LastApplyStatus indicates the customization status in the last applied cluster deployment. // +optional LastApplyStatus LastApplyStatusType `json:"lastApplyStatus,omitempty"` @@ -80,7 +79,6 @@ type ClusterDeploymentCustomizationStatus struct { // +patchMergeKey=type // +patchStrategy=merge // +optional - // Conditions is a list of conditions related to operator reconciliation Conditions []conditionsv1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` } @@ -90,7 +88,7 @@ const ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// ClusterDeploymentCustomizationList contains a list of ClusterDeploymentCustomizations +// ClusterDeploymentCustomizationList contains a list of ClusterDeploymentCustomizations. type ClusterDeploymentCustomizationList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` diff --git a/apis/hive/v1/clusterpool_types.go b/apis/hive/v1/clusterpool_types.go index c8673f9cc87..37efd25a558 100644 --- a/apis/hive/v1/clusterpool_types.go +++ b/apis/hive/v1/clusterpool_types.go @@ -94,7 +94,7 @@ type ClusterPoolSpec struct { HibernationConfig *HibernationConfig `json:"hibernationConfig"` // Inventory maintains a list of entries consumed by the ClusterPool - // to customize the default the ClusterDeployment + // to customize the default ClusterDeployment. // +optional Inventory []InventoryEntry `json:"inventory,omitempty"` } @@ -115,13 +115,13 @@ type HibernationConfig struct { ResumeTimeout metav1.Duration `json:"resumeTimeout"` } -// InventoryEntryKind in Kind of the inventory entry +// InventoryEntryKind is the Kind of the inventory entry. // +kubebuilder:validation:Enum="";ClusterDeploymentCustomization type InventoryEntryKind string const ClusterDeploymentCustomizationInventoryEntry InventoryEntryKind = "ClusterDeploymentCustomization" -// InventoryEntry maintains a reference to a custom resource consumed by a clusterpool to customize the cluster deployment +// InventoryEntry maintains a reference to a custom resource consumed by a clusterpool to customize the cluster deployment. type InventoryEntry struct { // Kind denotes the kind of the referenced resource. The default is ClusterDeploymentCustomization, which is also currently the only supported value. // +kubebuilder:default=ClusterDeploymentCustomization @@ -222,20 +222,12 @@ const ( ClusterPoolInventoryValidCondition ClusterPoolConditionType = "InventoryValid" ) -// Inventory (in)valid reasons const ( // InventoryReasonValid is used when all ClusterDeploymentCustomization are - // available and when used the ClusterDeployments are successfully installed + // available and when used the ClusterDeployments are successfully installed. InventoryReasonValid = "Valid" - // InventoryReasonMissing is used when one or more ClusterDeploymentCustomization are missing - InventoryReasonMissing = "Missing" - // InventoryReasonFound is used cancel a missing ClusterDeploymentCustomization - InventoryReasonFound = "Found" - // InventoryReasonBrokenByCloud is used when one or more ClusterDeployments installations failed - InventoryReasonBrokenByCloud = "BrokenByCloud" - // InvenotryReasonBrokenBySyntax is used when one or more ClusterDeploymentCustomization patching failed - InvenotryReasonBrokenBySyntax = "BrokenBySyntax" - // InventoryReasonInvalid is used when multiple reasons and ClusterDeploymentCustomizations are incompatible + // InventoryReasonInvalid is used when there is something wrong with ClusterDeploymentCustomization, for example + // patching issue, provisioning failure, missing, etc. InventoryReasonInvalid = "Invalid" ) diff --git a/config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml b/config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml index cadfca3bc45..b6d486855a8 100644 --- a/config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml +++ b/config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml @@ -18,7 +18,7 @@ spec: schema: openAPIV3Schema: description: ClusterDeploymentCustomization is the Schema for clusterdeploymentcustomizations - API + API. properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation @@ -34,11 +34,11 @@ spec: type: object spec: description: ClusterDeploymentCustomizationSpec defines the desired state - of ClusterDeploymentCustomization + of ClusterDeploymentCustomization. properties: installConfigPatches: description: InstallConfigPatches is a list of patches to be applied - to the install-config + to the install-config. items: description: PatchEntity represent a json patch (RFC 6902) to be applied to the install-config @@ -62,11 +62,11 @@ spec: type: object status: description: ClusterDeploymentCustomizationStatus defines the observed - state of ClusterDeploymentCustomization + state of ClusterDeploymentCustomization. properties: clusterDeploymentRef: description: ClusterDeploymentRef is a reference to the cluster deployment - that this customization is applied on + that this customization is applied on. properties: name: description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names @@ -75,8 +75,7 @@ spec: type: object conditions: description: Conditions describes the state of the operator's reconciliation - functionality. Conditions is a list of conditions related to operator - reconciliation + functionality. items: description: Condition represents the state of the operator's reconciliation functionality. @@ -104,11 +103,11 @@ spec: type: array lastApplyStatus: description: LastApplyStatus indicates the customization status in - the last applied cluster deployment + the last applied cluster deployment. type: string lastApplyTime: description: LastApplyTime indicates the time when the customization - was applied on a cluster deployment + was applied on a cluster deployment. format: date-time type: string type: object diff --git a/config/crds/hive.openshift.io_clusterdeployments.yaml b/config/crds/hive.openshift.io_clusterdeployments.yaml index 9f52d6daf75..26ddaf87f10 100644 --- a/config/crds/hive.openshift.io_clusterdeployments.yaml +++ b/config/crds/hive.openshift.io_clusterdeployments.yaml @@ -189,8 +189,8 @@ spec: type: string clusterDeploymentCustomization: description: CustomizationRef is the ClusterPool Inventory claimed - customization for this ClusterDeployment The Customization exists - in the ClusterPool namespace. + customization for this ClusterDeployment. The Customization + exists in the ClusterPool namespace. properties: name: description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names diff --git a/config/crds/hive.openshift.io_clusterpools.yaml b/config/crds/hive.openshift.io_clusterpools.yaml index 27ea0aa59ca..157e29b7e2f 100644 --- a/config/crds/hive.openshift.io_clusterpools.yaml +++ b/config/crds/hive.openshift.io_clusterpools.yaml @@ -154,10 +154,10 @@ spec: type: object inventory: description: Inventory maintains a list of entries consumed by the - ClusterPool to customize the default the ClusterDeployment + ClusterPool to customize the default ClusterDeployment. items: description: InventoryEntry maintains a reference to a custom resource - consumed by a clusterpool to customize the cluster deployment + consumed by a clusterpool to customize the cluster deployment. properties: kind: default: ClusterDeploymentCustomization diff --git a/hack/app-sre/saas-template.yaml b/hack/app-sre/saas-template.yaml index feb41a5ebdf..d5d9946239a 100644 --- a/hack/app-sre/saas-template.yaml +++ b/hack/app-sre/saas-template.yaml @@ -271,7 +271,7 @@ objects: schema: openAPIV3Schema: description: ClusterDeploymentCustomization is the Schema for clusterdeploymentcustomizations - API + API. properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation @@ -288,11 +288,11 @@ objects: type: object spec: description: ClusterDeploymentCustomizationSpec defines the desired - state of ClusterDeploymentCustomization + state of ClusterDeploymentCustomization. properties: installConfigPatches: description: InstallConfigPatches is a list of patches to be applied - to the install-config + to the install-config. items: description: PatchEntity represent a json patch (RFC 6902) to be applied to the install-config @@ -316,11 +316,11 @@ objects: type: object status: description: ClusterDeploymentCustomizationStatus defines the observed - state of ClusterDeploymentCustomization + state of ClusterDeploymentCustomization. properties: clusterDeploymentRef: description: ClusterDeploymentRef is a reference to the cluster - deployment that this customization is applied on + deployment that this customization is applied on. properties: name: description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names @@ -329,8 +329,7 @@ objects: type: object conditions: description: Conditions describes the state of the operator's reconciliation - functionality. Conditions is a list of conditions related to operator - reconciliation + functionality. items: description: Condition represents the state of the operator's reconciliation functionality. @@ -358,11 +357,11 @@ objects: type: array lastApplyStatus: description: LastApplyStatus indicates the customization status - in the last applied cluster deployment + in the last applied cluster deployment. type: string lastApplyTime: description: LastApplyTime indicates the time when the customization - was applied on a cluster deployment + was applied on a cluster deployment. format: date-time type: string type: object @@ -572,7 +571,7 @@ objects: type: string clusterDeploymentCustomization: description: CustomizationRef is the ClusterPool Inventory claimed - customization for this ClusterDeployment The Customization + customization for this ClusterDeployment. The Customization exists in the ClusterPool namespace. properties: name: @@ -2107,11 +2106,11 @@ objects: type: object inventory: description: Inventory maintains a list of entries consumed by the - ClusterPool to customize the default the ClusterDeployment + ClusterPool to customize the default ClusterDeployment. items: description: InventoryEntry maintains a reference to a custom resource consumed by a clusterpool to customize the cluster - deployment + deployment. properties: kind: default: ClusterDeploymentCustomization diff --git a/pkg/client/clientset/versioned/typed/hive/v1/clusterdeploymentcustomization.go b/pkg/client/clientset/versioned/typed/hive/v1/clusterdeploymentcustomization.go index dd7ce990a81..8191f80c1c9 100644 --- a/pkg/client/clientset/versioned/typed/hive/v1/clusterdeploymentcustomization.go +++ b/pkg/client/clientset/versioned/typed/hive/v1/clusterdeploymentcustomization.go @@ -24,6 +24,7 @@ type ClusterDeploymentCustomizationsGetter interface { type ClusterDeploymentCustomizationInterface interface { Create(ctx context.Context, clusterDeploymentCustomization *v1.ClusterDeploymentCustomization, opts metav1.CreateOptions) (*v1.ClusterDeploymentCustomization, error) Update(ctx context.Context, clusterDeploymentCustomization *v1.ClusterDeploymentCustomization, opts metav1.UpdateOptions) (*v1.ClusterDeploymentCustomization, error) + UpdateStatus(ctx context.Context, clusterDeploymentCustomization *v1.ClusterDeploymentCustomization, opts metav1.UpdateOptions) (*v1.ClusterDeploymentCustomization, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ClusterDeploymentCustomization, error) @@ -119,6 +120,22 @@ func (c *clusterDeploymentCustomizations) Update(ctx context.Context, clusterDep return } +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *clusterDeploymentCustomizations) UpdateStatus(ctx context.Context, clusterDeploymentCustomization *v1.ClusterDeploymentCustomization, opts metav1.UpdateOptions) (result *v1.ClusterDeploymentCustomization, err error) { + result = &v1.ClusterDeploymentCustomization{} + err = c.client.Put(). + Namespace(c.ns). + Resource("clusterdeploymentcustomizations"). + Name(clusterDeploymentCustomization.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(clusterDeploymentCustomization). + Do(ctx). + Into(result) + return +} + // Delete takes name of the clusterDeploymentCustomization and deletes it. Returns an error if one occurs. func (c *clusterDeploymentCustomizations) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { return c.client.Delete(). diff --git a/pkg/client/clientset/versioned/typed/hive/v1/fake/fake_clusterdeploymentcustomization.go b/pkg/client/clientset/versioned/typed/hive/v1/fake/fake_clusterdeploymentcustomization.go index e775114dae9..72969122200 100644 --- a/pkg/client/clientset/versioned/typed/hive/v1/fake/fake_clusterdeploymentcustomization.go +++ b/pkg/client/clientset/versioned/typed/hive/v1/fake/fake_clusterdeploymentcustomization.go @@ -86,6 +86,18 @@ func (c *FakeClusterDeploymentCustomizations) Update(ctx context.Context, cluste return obj.(*hivev1.ClusterDeploymentCustomization), err } +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeClusterDeploymentCustomizations) UpdateStatus(ctx context.Context, clusterDeploymentCustomization *hivev1.ClusterDeploymentCustomization, opts v1.UpdateOptions) (*hivev1.ClusterDeploymentCustomization, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(clusterdeploymentcustomizationsResource, "status", c.ns, clusterDeploymentCustomization), &hivev1.ClusterDeploymentCustomization{}) + + if obj == nil { + return nil, err + } + return obj.(*hivev1.ClusterDeploymentCustomization), err +} + // Delete takes name of the clusterDeploymentCustomization and deletes it. Returns an error if one occurs. func (c *FakeClusterDeploymentCustomizations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. diff --git a/pkg/controller/clusterdeployment/clusterdeployment_controller.go b/pkg/controller/clusterdeployment/clusterdeployment_controller.go index 4e25ff12e80..d3cef382567 100644 --- a/pkg/controller/clusterdeployment/clusterdeployment_controller.go +++ b/pkg/controller/clusterdeployment/clusterdeployment_controller.go @@ -564,7 +564,7 @@ func (r *ReconcileClusterDeployment) reconcile(request reconcile.Request, cd *hi if !controllerutils.HasFinalizer(cd, hivev1.FinalizerDeprovision) { cdLog.Debugf("adding clusterdeployment deprovision finalizer") - if err := r.addClusterDeploymentFinalizer(cd); err != nil { + if err := r.addClusterDeploymentFinalizer(cd, hivev1.FinalizerDeprovision); err != nil { cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error adding deprovision finalizer") return reconcile.Result{}, err } @@ -572,6 +572,16 @@ func (r *ReconcileClusterDeployment) reconcile(request reconcile.Request, cd *hi return reconcile.Result{}, nil } + if cd.Spec.ClusterPoolRef != nil && cd.Spec.ClusterPoolRef.CustomizationRef != nil && !controllerutils.HasFinalizer(cd, hivev1.FinalizerCustomizationRelease) { + cdLog.Debugf("adding clusterdeployment customization release finalizer") + if err := r.addClusterDeploymentFinalizer(cd, hivev1.FinalizerCustomizationRelease); err != nil { + cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error adding customization finalizer") + return reconcile.Result{}, err + } + + return reconcile.Result{}, nil + } + if cd.Spec.ManageDNS { updated, result, err := r.ensureManagedDNSZone(cd, cdLog) if updated || err != nil { @@ -1397,7 +1407,7 @@ func (r *ReconcileClusterDeployment) syncDeletedClusterDeployment(cd *hivev1.Clu case !dnsZoneGone: return reconcile.Result{RequeueAfter: defaultRequeueTime}, nil default: - cdLog.Infof("DNSZone gone, customization gone and deprovision request completed, removing finalizer") + cdLog.Infof("DNSZone gone, customization gone and deprovision request completed, removing deprovision finalizer") if err := r.removeClusterDeploymentFinalizer(cd, cdLog); err != nil { cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error removing finalizer") return reconcile.Result{}, err @@ -1406,12 +1416,9 @@ func (r *ReconcileClusterDeployment) syncDeletedClusterDeployment(cd *hivev1.Clu } } -func (r *ReconcileClusterDeployment) addClusterDeploymentFinalizer(cd *hivev1.ClusterDeployment) error { +func (r *ReconcileClusterDeployment) addClusterDeploymentFinalizer(cd *hivev1.ClusterDeployment, finalizer string) error { cd = cd.DeepCopy() - controllerutils.AddFinalizer(cd, hivev1.FinalizerDeprovision) - if cd.Spec.ClusterPoolRef != nil && cd.Spec.ClusterPoolRef.CustomizationRef != nil && !controllerutils.HasFinalizer(cd, hivev1.FinalizerCustomizationRelease) { - controllerutils.AddFinalizer(cd, hivev1.FinalizerCustomizationRelease) - } + controllerutils.AddFinalizer(cd, finalizer) return r.Update(context.TODO(), cd) } @@ -1435,41 +1442,48 @@ func (r *ReconcileClusterDeployment) removeClusterDeploymentFinalizer(cd *hivev1 } func (r *ReconcileClusterDeployment) releaseCustomization(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) error { - if cd.Spec.ClusterPoolRef == nil || cd.Spec.ClusterPoolRef.CustomizationRef == nil { + if cd.Spec.ClusterPoolRef == nil || cd.Spec.ClusterPoolRef.CustomizationRef == nil || !controllerutils.HasFinalizer(cd, hivev1.FinalizerCustomizationRelease) { return nil } customizationRef := cd.Spec.ClusterPoolRef.CustomizationRef cdc := &hivev1.ClusterDeploymentCustomization{} - err := r.Client.Get(context.TODO(), client.ObjectKey{Namespace: cd.Spec.ClusterPoolRef.Namespace, Name: customizationRef.Name}, cdc) + cdcNamespace := cd.Spec.ClusterPoolRef.Namespace + cdcName := customizationRef.Name + cdcLog := cdLog.WithField("customization", cdcName).WithField("namespace", cdcNamespace) + err := r.Client.Get(context.TODO(), client.ObjectKey{Namespace: cdcNamespace, Name: cdcName}, cdc) if err != nil { if apierrors.IsNotFound(err) { - cdLog.WithField("Customization", customizationRef.Name).Info("customization not found, nothing to release") + cdcLog.Info("customization not found, nothing to release") controllerutils.DeleteFinalizer(cd, hivev1.FinalizerCustomizationRelease) if err := r.Update(context.TODO(), cd); err != nil { - cdLog.WithError(err).WithField("Customization", customizationRef.Name).Error("failed to update ClusterDeployment") + cdcLog.WithError(err).Error("failed to update ClusterDeployment") + return err } - return nil + return err } log.WithError(err).Error("error reading customization") return err } - conditionsv1.SetStatusCondition(&cdc.Status.Conditions, conditionsv1.Condition{ - Type: conditionsv1.ConditionAvailable, - Status: corev1.ConditionFalse, - Reason: "ClusterDeploymentCustomizationAvailable", - Message: "Cluster Deployment Customization is available", - }) + existingCondition := conditionsv1.FindStatusCondition(cdc.Status.Conditions, conditionsv1.ConditionAvailable) + if existingCondition.Reason != "Available" || existingCondition.Message != "available" { + conditionsv1.SetStatusConditionNoHeartbeat(&cdc.Status.Conditions, conditionsv1.Condition{ + Type: conditionsv1.ConditionAvailable, + Status: corev1.ConditionFalse, + Reason: "Available", + Message: "available", + }) + } cdc.Status.ClusterDeploymentRef = nil if err := r.Status().Update(context.Background(), cdc); err != nil { - cdLog.WithError(err).WithField("Customization", customizationRef.Name).Error("failed to update ClusterDeploymentCustomizationAvailable condition") + cdcLog.WithError(err).Error("failed to update ClusterDeploymentCustomizationAvailable condition") return err } controllerutils.DeleteFinalizer(cd, hivev1.FinalizerCustomizationRelease) if err := r.Update(context.TODO(), cd); err != nil { - cdLog.WithError(err).WithField("Customization", customizationRef.Name).Error("Failed to update ClusterDeployment after ClusterDeploymentCustomization finalizer deletion") + cdcLog.WithError(err).Error("Failed to update ClusterDeployment after ClusterDeploymentCustomization finalizer deletion") return err } diff --git a/pkg/controller/clusterpool/clusterpool_controller.go b/pkg/controller/clusterpool/clusterpool_controller.go index a98b01fe6c4..40741a90bba 100644 --- a/pkg/controller/clusterpool/clusterpool_controller.go +++ b/pkg/controller/clusterpool/clusterpool_controller.go @@ -8,13 +8,12 @@ import ( "reflect" "sort" - "github.com/ghodss/yaml" + yamlpatch "github.com/krishicks/yaml-patch" "github.com/pkg/errors" log "github.com/sirupsen/logrus" utilerrors "k8s.io/apimachinery/pkg/util/errors" "github.com/davegardnerisme/deephash" - jsonpatch "github.com/evanphx/json-patch" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -296,7 +295,7 @@ func (r *ReconcileClusterPool) Reconcile(ctx context.Context, request reconcile. return reconcile.Result{}, err } r.updateInventory(clp, cds.Unassigned(false), hivev1.LastApplySucceeded, logger) - r.updateInventory(clp, cds.Installing(), hivev1.LastApplySucceeded, logger) + r.updateInventory(clp, cds.Installing(), hivev1.LastApplyInstallationPending, logger) claims, err := getAllClaimsForPool(r.Client, clp, logger) if err != nil { @@ -435,9 +434,9 @@ func (r *ReconcileClusterPool) updateInventory(clp *hivev1.ClusterPool, cds []*h reason := hivev1.InventoryReasonValid switch status { case hivev1.LastApplyBrokenCloud: - reason = hivev1.InventoryReasonBrokenByCloud + reason = string(hivev1.LastApplyBrokenCloud) case hivev1.LastApplyBrokenSyntax: - reason = hivev1.InvenotryReasonBrokenBySyntax + reason = string(hivev1.LastApplyBrokenSyntax) } // Helper functions to get and update ClusterDeploymentCustomization, and update inventory message if missing @@ -445,8 +444,7 @@ func (r *ReconcileClusterPool) updateInventory(clp *hivev1.ClusterPool, cds []*h cdc := &hivev1.ClusterDeploymentCustomization{} if err := r.Client.Get(context.Background(), client.ObjectKey{Namespace: clp.Namespace, Name: name}, cdc); err != nil { if apierrors.IsNotFound(err) { - reason := hivev1.InventoryReasonMissing - r.updateInventoryValidMessage(clp, cdc.Name, reason, false, logger) + r.updateInventoryValidMessage(clp, cdc.Name, "missing", false, logger) } log.WithError(err).Warn("error reading customization") return nil @@ -460,9 +458,22 @@ func (r *ReconcileClusterPool) updateInventory(clp *hivev1.ClusterPool, cds []*h } } + contains := func(s []string, str string) bool { + for _, v := range s { + if v == str { + return true + } + } + + return false + } + // First update the inventory and ClusterDeploymentCustomizations related to changed ClusterDeployments var active_cdc []string for _, cd := range cds { + if cd.Spec.ClusterPoolRef.CustomizationRef == nil { + continue + } active_cdc = append(active_cdc, cd.Spec.ClusterPoolRef.CustomizationRef.Name) if cdc := getCDC(cd.Spec.ClusterPoolRef.CustomizationRef.Name); cdc != nil { r.updateInventoryValidMessage(clp, cdc.Name, reason, false, logger) @@ -481,41 +492,40 @@ func (r *ReconcileClusterPool) updateInventory(clp *hivev1.ClusterPool, cds []*h // Next update the rest of the inventory and ClusterDeploymentCustomizations for _, item := range clp.Spec.Inventory { - pos := sort.SearchStrings(active_cdc, item.Name) - if !(pos < len(active_cdc) && active_cdc[pos] == item.Name) { + if !contains(active_cdc, item.Name) { continue } - if cdc := getCDC(item.Name); cdc != nil { - // This will fix the message if CDC was missing before - r.updateInventoryValidMessage(clp, cdc.Name, hivev1.InventoryReasonFound, false, logger) - // The following part will try to fix the following scenarios: - // 1. CDC condition is available but it has reference to existing ClusterDeployment - // 2. CDC is reserved but it doesn't have a reference to a ClusterDeployment - currentAvailability := controllerutils.FindClusterDeploymentCustomizationCondition( - cdc.Status.Conditions, - conditionsv1.ConditionAvailable, - ) - if cdc.Status.ClusterDeploymentRef != nil { - cd := &hivev1.ClusterDeployment{} - ref := client.ObjectKey{Namespace: cdc.Status.ClusterDeploymentRef.Name, Name: cdc.Status.ClusterDeploymentRef.Name} - if err := r.Client.Get(context.Background(), ref, cd); err != nil { - if apierrors.IsNotFound(err) { - cdc.Status.ClusterDeploymentRef = nil - } + + cdc := getCDC(item.Name) + if cdc == nil { + continue + } + // This will fix the message if CDC was missing before + r.updateInventoryValidMessage(clp, cdc.Name, "found", false, logger) + // The following part will try to fix the following scenarios: + // 1. CDC condition is available but it has reference to existing ClusterDeployment + // 2. CDC is reserved but it doesn't have a reference to a ClusterDeployment + currentAvailability := conditionsv1.FindStatusCondition(cdc.Status.Conditions, conditionsv1.ConditionAvailable) + if cdc.Status.ClusterDeploymentRef != nil { + cd := &hivev1.ClusterDeployment{} + ref := client.ObjectKey{Namespace: cdc.Status.ClusterDeploymentRef.Name, Name: cdc.Status.ClusterDeploymentRef.Name} + if err := r.Client.Get(context.Background(), ref, cd); err != nil { + if apierrors.IsNotFound(err) { + cdc.Status.ClusterDeploymentRef = nil } } - availableWithCD := (currentAvailability != nil && currentAvailability.Status == corev1.ConditionTrue) && cdc.Status.ClusterDeploymentRef != nil - reservedWithoutCD := (currentAvailability != nil && currentAvailability.Status == corev1.ConditionFalse) && cdc.Status.ClusterDeploymentRef == nil - if availableWithCD || reservedWithoutCD { - status := true - msg := "Available" - if availableWithCD { - status = false - msg = "Fixed reservation" - } - - updateCDC(cdc, status, msg) + } + availableWithCD := (currentAvailability != nil && currentAvailability.Status == corev1.ConditionTrue) && cdc.Status.ClusterDeploymentRef != nil + reservedWithoutCD := (currentAvailability != nil && currentAvailability.Status == corev1.ConditionFalse) && cdc.Status.ClusterDeploymentRef == nil + if availableWithCD || reservedWithoutCD { + status := true + msg := "available" + if availableWithCD { + status = false + msg = "fixed reservation" } + + updateCDC(cdc, status, msg) } } @@ -531,16 +541,19 @@ func (r *ReconcileClusterPool) setCustomizationAvailabilityCondition(cdc *hivev1 reason = "Reserved" } - conditionsv1.SetStatusCondition(&cdc.Status.Conditions, conditionsv1.Condition{ - Type: conditionsv1.ConditionAvailable, - Status: status, - Reason: reason, - Message: message, - }) + existingCondition := conditionsv1.FindStatusCondition(cdc.Status.Conditions, conditionsv1.ConditionAvailable) + if existingCondition.Reason != reason || existingCondition.Message != message { + conditionsv1.SetStatusConditionNoHeartbeat(&cdc.Status.Conditions, conditionsv1.Condition{ + Type: conditionsv1.ConditionAvailable, + Status: status, + Reason: reason, + Message: message, + }) - if err := r.Status().Update(context.TODO(), cdc); err != nil { - logger.WithError(err).Log(controllerutils.LogLevel(err), "could not update ClusterDeploymentCustomization conditions") - return errors.Wrap(err, "could not update ClusterDeploymentCustomization conditions") + if err := r.Status().Update(context.TODO(), cdc); err != nil { + logger.WithError(err).Log(controllerutils.LogLevel(err), "could not update ClusterDeploymentCustomization conditions") + return errors.Wrap(err, "could not update ClusterDeploymentCustomization conditions") + } } return nil @@ -886,18 +899,28 @@ func (r *ReconcileClusterPool) patchInstallConfig(clp *hivev1.ClusterPool, cd *h cdc := &hivev1.ClusterDeploymentCustomization{} if err := r.Client.Get(context.Background(), client.ObjectKey{Namespace: clp.Namespace, Name: cd.Spec.ClusterPoolRef.CustomizationRef.Name}, cdc); err != nil { if apierrors.IsNotFound(err) { - r.updateInventoryValidMessage(clp, cdc.Name, hivev1.InventoryReasonMissing, true, logger) + r.updateInventoryValidMessage(clp, cdc.Name, "missing", true, logger) } return err } - installConfig, err := applyPatches(cdc.Spec.InstallConfigPatches, secret.StringData["install-config.yaml"], logger) + newPatch := yamlpatch.Patch{} + for _, patch := range cdc.Spec.InstallConfigPatches { + var value interface{} + value = patch.Value + newPatch = append(newPatch, yamlpatch.Operation{ + Op: yamlpatch.Op(patch.Op), + Path: yamlpatch.OpPath(patch.Path), + Value: yamlpatch.NewNode(&value), + }) + } + installConfig, err := newPatch.Apply(secret.Data["install-config.yaml"]) if err != nil { - r.updateInventoryValidMessage(clp, cdc.Name, hivev1.InvenotryReasonBrokenBySyntax, true, logger) + r.updateInventoryValidMessage(clp, cdc.Name, string(hivev1.LastApplyBrokenSyntax), true, logger) cdc.Status.LastApplyStatus = hivev1.LastApplyBrokenSyntax if updateErr := r.Status().Update(context.Background(), cdc); updateErr != nil { if apierrors.IsNotFound(err) { - r.updateInventoryValidMessage(clp, cdc.Name, hivev1.InventoryReasonMissing, true, logger) + r.updateInventoryValidMessage(clp, cdc.Name, "missing", true, logger) } } @@ -905,22 +928,25 @@ func (r *ReconcileClusterPool) patchInstallConfig(clp *hivev1.ClusterPool, cd *h } // Reserving ClusterDeploymentCustomization - cdc.Status.LastApplyTime = metav1.Now() - conditionsv1.SetStatusCondition(&cdc.Status.Conditions, conditionsv1.Condition{ - Type: conditionsv1.ConditionAvailable, - Status: corev1.ConditionFalse, - Reason: "Reserved", - Message: "Reserved", - }) + existingCondition := conditionsv1.FindStatusCondition(cdc.Status.Conditions, conditionsv1.ConditionAvailable) + if existingCondition.Reason != "Reserved" || existingCondition.Message != "reserved" { + cdc.Status.LastApplyTime = metav1.Now() + conditionsv1.SetStatusConditionNoHeartbeat(&cdc.Status.Conditions, conditionsv1.Condition{ + Type: conditionsv1.ConditionAvailable, + Status: corev1.ConditionFalse, + Reason: "Reserved", + Message: "reserved", + }) - if err := r.Status().Update(context.Background(), cdc); err != nil { - if apierrors.IsNotFound(err) { - r.updateInventoryValidMessage(clp, cdc.Name, hivev1.InventoryReasonMissing, true, logger) + if err := r.Status().Update(context.Background(), cdc); err != nil { + if apierrors.IsNotFound(err) { + r.updateInventoryValidMessage(clp, cdc.Name, "missing", true, logger) + } + return err } - return err } - secret.StringData["install-config.yaml"] = installConfig + secret.Data["install-config.yaml"] = installConfig return nil } @@ -1132,7 +1158,7 @@ func (r *ReconcileClusterPool) updateInventoryValidMessage(pool *hivev1.ClusterP // Replace the entry for this CDC -- but omit if it's valid if cdcName != "" && cdcState != "" { delete(curMap, cdcName) - if cdcState != hivev1.InventoryReasonValid && cdcState != hivev1.InventoryReasonFound { + if cdcState != hivev1.InventoryReasonValid || cdcState != "found" { curMap[cdcName] = string(cdcState) } } @@ -1307,14 +1333,11 @@ func (r *ReconcileClusterPool) getInventoryCustomization(pool *hivev1.ClusterPoo cdc := &hivev1.ClusterDeploymentCustomization{} if err := r.Client.Get(context.Background(), client.ObjectKey{Namespace: pool.Namespace, Name: entry.Name}, cdc); err != nil { if apierrors.IsNotFound(err) { - r.updateInventoryValidMessage(pool, cdc.Name, hivev1.InventoryReasonMissing, true, logger) + r.updateInventoryValidMessage(pool, cdc.Name, "missing", true, logger) } return err } - currentAvailability := controllerutils.FindClusterDeploymentCustomizationCondition( - cdc.Status.Conditions, - conditionsv1.ConditionAvailable, - ) + currentAvailability := conditionsv1.FindStatusCondition(cdc.Status.Conditions, conditionsv1.ConditionAvailable) if currentAvailability == nil || currentAvailability.Status == corev1.ConditionTrue { inventory = append(inventory, *cdc) } @@ -1340,37 +1363,6 @@ func (r *ReconcileClusterPool) getInventoryCustomization(pool *hivev1.ClusterPoo return errors.New("no customization available") } -// applyPatches is for applying JSON patches (RFC 6902) on install-config (YAML format) -func applyPatches(patches []hivev1.PatchEntity, data string, logger log.FieldLogger) (string, error) { - targetJson, err := yaml.YAMLToJSON([]byte(data)) - if err != nil { - log.WithError(err).Error("unable to parse install-config template") - return data, err - } - - patchJson, err := json.Marshal(patches) - if err != nil { - log.WithError(err).Error("unable to marshal patches to json") - return data, err - } - - patch, err := jsonpatch.DecodePatch(patchJson) - if err != nil { - log.WithError(err).Error("unable to create json patch") - return data, err - } - - patchedJson, err := patch.Apply(targetJson) - if err != nil { - log.WithError(err).Error("unable to patch install-config template") - return data, err - } - - patchedYaml, _ := yaml.JSONToYAML(patchedJson) - - return string(patchedYaml), nil -} - func isInstallConfigSecret(obj interface{}) *corev1.Secret { if secret, ok := obj.(*corev1.Secret); ok { _, ok := secret.StringData["install-config.yaml"] diff --git a/pkg/controller/utils/conditions.go b/pkg/controller/utils/conditions.go index 8b9943c2c72..7d49b4ebb09 100644 --- a/pkg/controller/utils/conditions.go +++ b/pkg/controller/utils/conditions.go @@ -6,7 +6,6 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" hivev1 "github.com/openshift/hive/apis/hive/v1" ) @@ -543,15 +542,6 @@ func SetMachinePoolCondition( return newConditions } -func FindClusterDeploymentCustomizationCondition(conditions []conditionsv1.Condition, conditionType conditionsv1.ConditionType) *conditionsv1.Condition { - for i, condition := range conditions { - if condition.Type == conditionType { - return &conditions[i] - } - } - return nil -} - // SetMachinePoolConditionWithChangeCheck sets a condition on a MachinePool resource's status. // It returns the conditions as well a boolean indicating whether there was a change made // to the conditions. diff --git a/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeployment_types.go b/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeployment_types.go index e842e7d639a..e83976cf855 100644 --- a/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeployment_types.go +++ b/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeployment_types.go @@ -280,7 +280,7 @@ type ClusterPoolReference struct { // ClaimedTimestamp is the time this cluster was assigned to a ClusterClaim. This is only used for // ClusterDeployments belonging to ClusterPools. ClaimedTimestamp *metav1.Time `json:"claimedTimestamp,omitempty"` - // CustomizationRef is the ClusterPool Inventory claimed customization for this ClusterDeployment + // CustomizationRef is the ClusterPool Inventory claimed customization for this ClusterDeployment. // The Customization exists in the ClusterPool namespace. // +optional CustomizationRef *corev1.LocalObjectReference `json:"clusterDeploymentCustomization,omitempty"` diff --git a/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go b/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go index af4942841e3..baf95efc1a8 100644 --- a/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go +++ b/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go @@ -7,20 +7,20 @@ import ( ) // LastApplyStatusType indicates the status of the customization on the last -// applied cluster deployment. This is needed to for inventory sorting process to -// avoid using same broken customization +// applied cluster deployment. This is used for inventory sorting process to +// reduce the likelihood of using a broken customization repeatedly. type LastApplyStatusType string const ( // LastApplySucceeded indicates that the customization - // worked properly on the last applied cluster deployment + // worked properly on the last applied cluster deployment. LastApplySucceeded LastApplyStatusType = "Succeeded" // LastApplyBrokenSyntax indicates that Hive failed to apply // customization patches on install-config. More details would be found in // Valid condition message. LastApplyBrokenSyntax LastApplyStatusType = "BrokenBySyntax" // LastApplyBrokenCloud indicates that cluster deployment provision has failed - // when used this customization. More details would be found in the Valid condition message. + // when using this customization. More details would be found in the Valid condition message. LastApplyBrokenCloud LastApplyStatusType = "BrokenByCloud" // LastApplyInstallationPending indicates that the customization patches have // been successfully applied but provisioning is not completed yet. @@ -28,10 +28,9 @@ const ( ) // +genclient -// +genclient:noStatus // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// ClusterDeploymentCustomization is the Schema for clusterdeploymentcustomizations API +// ClusterDeploymentCustomization is the Schema for clusterdeploymentcustomizations API. // +kubebuilder:subresource:status // +k8s:openapi-gen=true // +kubebuilder:resource:scope=Namespaced @@ -43,9 +42,9 @@ type ClusterDeploymentCustomization struct { Status ClusterDeploymentCustomizationStatus `json:"status,omitempty"` } -// ClusterDeploymentCustomizationSpec defines the desired state of ClusterDeploymentCustomization +// ClusterDeploymentCustomizationSpec defines the desired state of ClusterDeploymentCustomization. type ClusterDeploymentCustomizationSpec struct { - // InstallConfigPatches is a list of patches to be applied to the install-config + // InstallConfigPatches is a list of patches to be applied to the install-config. InstallConfigPatches []PatchEntity `json:"installConfigPatches,omitempty"` } @@ -62,17 +61,17 @@ type PatchEntity struct { Value string `json:"value"` } -// ClusterDeploymentCustomizationStatus defines the observed state of ClusterDeploymentCustomization +// ClusterDeploymentCustomizationStatus defines the observed state of ClusterDeploymentCustomization. type ClusterDeploymentCustomizationStatus struct { - // ClusterDeploymentRef is a reference to the cluster deployment that this customization is applied on + // ClusterDeploymentRef is a reference to the cluster deployment that this customization is applied on. // +optional ClusterDeploymentRef *corev1.LocalObjectReference `json:"clusterDeploymentRef,omitempty"` - // LastApplyTime indicates the time when the customization was applied on a cluster deployment + // LastApplyTime indicates the time when the customization was applied on a cluster deployment. // +optional LastApplyTime metav1.Time `json:"lastApplyTime,omitempty"` - // LastApplyStatus indicates the customization status in the last applied cluster deployment + // LastApplyStatus indicates the customization status in the last applied cluster deployment. // +optional LastApplyStatus LastApplyStatusType `json:"lastApplyStatus,omitempty"` @@ -80,7 +79,6 @@ type ClusterDeploymentCustomizationStatus struct { // +patchMergeKey=type // +patchStrategy=merge // +optional - // Conditions is a list of conditions related to operator reconciliation Conditions []conditionsv1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` } @@ -90,7 +88,7 @@ const ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// ClusterDeploymentCustomizationList contains a list of ClusterDeploymentCustomizations +// ClusterDeploymentCustomizationList contains a list of ClusterDeploymentCustomizations. type ClusterDeploymentCustomizationList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` diff --git a/vendor/github.com/openshift/hive/apis/hive/v1/clusterpool_types.go b/vendor/github.com/openshift/hive/apis/hive/v1/clusterpool_types.go index c8673f9cc87..f7ea0ffc9b4 100644 --- a/vendor/github.com/openshift/hive/apis/hive/v1/clusterpool_types.go +++ b/vendor/github.com/openshift/hive/apis/hive/v1/clusterpool_types.go @@ -94,7 +94,7 @@ type ClusterPoolSpec struct { HibernationConfig *HibernationConfig `json:"hibernationConfig"` // Inventory maintains a list of entries consumed by the ClusterPool - // to customize the default the ClusterDeployment + // to customize the default ClusterDeployment. // +optional Inventory []InventoryEntry `json:"inventory,omitempty"` } @@ -115,13 +115,13 @@ type HibernationConfig struct { ResumeTimeout metav1.Duration `json:"resumeTimeout"` } -// InventoryEntryKind in Kind of the inventory entry +// InventoryEntryKind is the Kind of the inventory entry. // +kubebuilder:validation:Enum="";ClusterDeploymentCustomization type InventoryEntryKind string const ClusterDeploymentCustomizationInventoryEntry InventoryEntryKind = "ClusterDeploymentCustomization" -// InventoryEntry maintains a reference to a custom resource consumed by a clusterpool to customize the cluster deployment +// InventoryEntry maintains a reference to a custom resource consumed by a clusterpool to customize the cluster deployment. type InventoryEntry struct { // Kind denotes the kind of the referenced resource. The default is ClusterDeploymentCustomization, which is also currently the only supported value. // +kubebuilder:default=ClusterDeploymentCustomization From 68541715d2e1cb12f3e16abb17d415403a7c7a4e Mon Sep 17 00:00:00 2001 From: Alexander Braverman Masis Date: Sun, 1 May 2022 21:52:33 +0300 Subject: [PATCH 05/27] Refactor to collections --- .../clusterdeploymentcustomization_types.go | 4 + ...ft.io_clusterdeploymentcustomizations.yaml | 9 + hack/app-sre/saas-template.yaml | 9 + .../clusterdeployment_controller.go | 21 +- .../clusterpool/clusterpool_controller.go | 289 +++--------------- .../clusterpool_controller_test.go | 2 +- pkg/controller/clusterpool/collections.go | 243 +++++++++++++++ .../clusterdeploymentcustomization.go | 17 +- .../clusterdeploymentcustomization_types.go | 4 + .../hive/apis/hive/v1/clusterpool_types.go | 14 +- 10 files changed, 346 insertions(+), 266 deletions(-) diff --git a/apis/hive/v1/clusterdeploymentcustomization_types.go b/apis/hive/v1/clusterdeploymentcustomization_types.go index baf95efc1a8..059e10ba6c4 100644 --- a/apis/hive/v1/clusterdeploymentcustomization_types.go +++ b/apis/hive/v1/clusterdeploymentcustomization_types.go @@ -67,6 +67,10 @@ type ClusterDeploymentCustomizationStatus struct { // +optional ClusterDeploymentRef *corev1.LocalObjectReference `json:"clusterDeploymentRef,omitempty"` + // ClusterPoolRef is the name of the current cluster pool the CDC used at. + // +optional + ClusterPoolRef *corev1.LocalObjectReference `json:"clusterPoolRef,omitempty"` + // LastApplyTime indicates the time when the customization was applied on a cluster deployment. // +optional LastApplyTime metav1.Time `json:"lastApplyTime,omitempty"` diff --git a/config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml b/config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml index b6d486855a8..89b11855ea3 100644 --- a/config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml +++ b/config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml @@ -73,6 +73,15 @@ spec: TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object + clusterPoolRef: + description: ClusterPoolRef is the name of the current cluster pool + the CDC used at. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object conditions: description: Conditions describes the state of the operator's reconciliation functionality. diff --git a/hack/app-sre/saas-template.yaml b/hack/app-sre/saas-template.yaml index d5d9946239a..10b59cc1560 100644 --- a/hack/app-sre/saas-template.yaml +++ b/hack/app-sre/saas-template.yaml @@ -327,6 +327,15 @@ objects: TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object + clusterPoolRef: + description: ClusterPoolRef is the name of the current cluster pool + the CDC used at. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object conditions: description: Conditions describes the state of the operator's reconciliation functionality. diff --git a/pkg/controller/clusterdeployment/clusterdeployment_controller.go b/pkg/controller/clusterdeployment/clusterdeployment_controller.go index d3cef382567..dde61468c64 100644 --- a/pkg/controller/clusterdeployment/clusterdeployment_controller.go +++ b/pkg/controller/clusterdeployment/clusterdeployment_controller.go @@ -563,7 +563,7 @@ func (r *ReconcileClusterDeployment) reconcile(request reconcile.Request, cd *hi } if !controllerutils.HasFinalizer(cd, hivev1.FinalizerDeprovision) { - cdLog.Debugf("adding clusterdeployment deprovision finalizer") + cdLog.Debug("adding clusterdeployment deprovision finalizer") if err := r.addClusterDeploymentFinalizer(cd, hivev1.FinalizerDeprovision); err != nil { cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error adding deprovision finalizer") return reconcile.Result{}, err @@ -573,7 +573,7 @@ func (r *ReconcileClusterDeployment) reconcile(request reconcile.Request, cd *hi } if cd.Spec.ClusterPoolRef != nil && cd.Spec.ClusterPoolRef.CustomizationRef != nil && !controllerutils.HasFinalizer(cd, hivev1.FinalizerCustomizationRelease) { - cdLog.Debugf("adding clusterdeployment customization release finalizer") + cdLog.Debug("adding clusterdeployment customization release finalizer") if err := r.addClusterDeploymentFinalizer(cd, hivev1.FinalizerCustomizationRelease); err != nil { cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error adding customization finalizer") return reconcile.Result{}, err @@ -1459,14 +1459,16 @@ func (r *ReconcileClusterDeployment) releaseCustomization(cd *hivev1.ClusterDepl cdcLog.WithError(err).Error("failed to update ClusterDeployment") return err } - return err + return nil } log.WithError(err).Error("error reading customization") return err } + changed := false existingCondition := conditionsv1.FindStatusCondition(cdc.Status.Conditions, conditionsv1.ConditionAvailable) if existingCondition.Reason != "Available" || existingCondition.Message != "available" { + changed = true conditionsv1.SetStatusConditionNoHeartbeat(&cdc.Status.Conditions, conditionsv1.Condition{ Type: conditionsv1.ConditionAvailable, Status: corev1.ConditionFalse, @@ -1475,10 +1477,15 @@ func (r *ReconcileClusterDeployment) releaseCustomization(cd *hivev1.ClusterDepl }) } - cdc.Status.ClusterDeploymentRef = nil - if err := r.Status().Update(context.Background(), cdc); err != nil { - cdcLog.WithError(err).Error("failed to update ClusterDeploymentCustomizationAvailable condition") - return err + if cdc.Status.ClusterDeploymentRef != nil { + changed = true + cdc.Status.ClusterDeploymentRef = nil + } + if changed { + if err := r.Status().Update(context.Background(), cdc); err != nil { + cdcLog.WithError(err).Error("failed to update ClusterDeploymentCustomizationAvailable condition") + return err + } } controllerutils.DeleteFinalizer(cd, hivev1.FinalizerCustomizationRelease) diff --git a/pkg/controller/clusterpool/clusterpool_controller.go b/pkg/controller/clusterpool/clusterpool_controller.go index 40741a90bba..2053bcc6299 100644 --- a/pkg/controller/clusterpool/clusterpool_controller.go +++ b/pkg/controller/clusterpool/clusterpool_controller.go @@ -2,7 +2,6 @@ package clusterpool import ( "context" - "encoding/json" "fmt" "math" "reflect" @@ -49,6 +48,7 @@ const ( icSecretDependent = "install config template secret" cdClusterPoolIndex = "spec.clusterpool.namespacedname" claimClusterPoolIndex = "spec.clusterpoolname" + cdcClusterPoolIndex = "status.clusterpoolname" defaultInventoryAttempts = 5 ) @@ -294,16 +294,21 @@ func (r *ReconcileClusterPool) Reconcile(ctx context.Context, request reconcile. if err != nil { return reconcile.Result{}, err } - r.updateInventory(clp, cds.Unassigned(false), hivev1.LastApplySucceeded, logger) - r.updateInventory(clp, cds.Installing(), hivev1.LastApplyInstallationPending, logger) - claims, err := getAllClaimsForPool(r.Client, clp, logger) if err != nil { return reconcile.Result{}, err } + cdcs, err := getAllCustomizationsForPool(r.Client, clp, logger) + if err != nil { + return reconcile.Result{}, err + } + claims.SyncClusterDeploymentAssignments(r.Client, cds, logger) cds.SyncClaimAssignments(r.Client, claims, logger) + if err := cdcs.SyncClusterDeploymentCustomizationAssignments(r.Client, clp, cds, logger); err != nil { + return reconcile.Result{}, err + } origStatus := clp.Status.DeepCopy() clp.Status.Size = int32(len(cds.Unassigned(true))) @@ -380,7 +385,6 @@ func (r *ReconcileClusterPool) Reconcile(ctx context.Context, request reconcile. // consume our maxConcurrent with additions than deletions. But we put it before the // "deleteExcessClusters" case because we would rather trim broken clusters than viable ones. case len(cds.Broken()) > 0: - r.updateInventory(clp, cds.Broken(), hivev1.LastApplyBrokenCloud, logger) if err := r.deleteBrokenClusters(cds, availableCurrent, logger); err != nil { return reconcile.Result{}, err } @@ -423,142 +427,6 @@ func (r *ReconcileClusterPool) Reconcile(ctx context.Context, request reconcile. return reconcile.Result{}, nil } -// updateInventory ensures that the inventory of the cluster pool and the related ClusterDeploymentCustomizations are up to date. -// This includes validating resources existance and updating references when needed. -func (r *ReconcileClusterPool) updateInventory(clp *hivev1.ClusterPool, cds []*hivev1.ClusterDeployment, status hivev1.LastApplyStatusType, logger log.FieldLogger) { - // no need to update Inventory if it doesn't exist - if clp.Spec.Inventory == nil { - return - } - // InventoryValid condition and reason - reason := hivev1.InventoryReasonValid - switch status { - case hivev1.LastApplyBrokenCloud: - reason = string(hivev1.LastApplyBrokenCloud) - case hivev1.LastApplyBrokenSyntax: - reason = string(hivev1.LastApplyBrokenSyntax) - } - - // Helper functions to get and update ClusterDeploymentCustomization, and update inventory message if missing - getCDC := func(name string) *hivev1.ClusterDeploymentCustomization { - cdc := &hivev1.ClusterDeploymentCustomization{} - if err := r.Client.Get(context.Background(), client.ObjectKey{Namespace: clp.Namespace, Name: name}, cdc); err != nil { - if apierrors.IsNotFound(err) { - r.updateInventoryValidMessage(clp, cdc.Name, "missing", false, logger) - } - log.WithError(err).Warn("error reading customization") - return nil - } - return cdc - } - - updateCDC := func(cdc *hivev1.ClusterDeploymentCustomization, status bool, msg string) { - if err := r.setCustomizationAvailabilityCondition(cdc, status, msg, logger); err != nil { - log.WithError(err).Warn("failed to update customization status") - } - } - - contains := func(s []string, str string) bool { - for _, v := range s { - if v == str { - return true - } - } - - return false - } - - // First update the inventory and ClusterDeploymentCustomizations related to changed ClusterDeployments - var active_cdc []string - for _, cd := range cds { - if cd.Spec.ClusterPoolRef.CustomizationRef == nil { - continue - } - active_cdc = append(active_cdc, cd.Spec.ClusterPoolRef.CustomizationRef.Name) - if cdc := getCDC(cd.Spec.ClusterPoolRef.CustomizationRef.Name); cdc != nil { - r.updateInventoryValidMessage(clp, cdc.Name, reason, false, logger) - // Fix missing ClusterDeployment Reference - if cdc.Status.ClusterDeploymentRef == nil { - cdc.Status.ClusterDeploymentRef = &corev1.LocalObjectReference{Name: cd.Name} - } - - cdc.Status.LastApplyStatus = status - - updateCDC(cdc, false, "reserved") - } - } - r.updateInventoryValidMessage(clp, "", "", true, logger) // just the inventory condition is being updated - sort.Strings(active_cdc) - - // Next update the rest of the inventory and ClusterDeploymentCustomizations - for _, item := range clp.Spec.Inventory { - if !contains(active_cdc, item.Name) { - continue - } - - cdc := getCDC(item.Name) - if cdc == nil { - continue - } - // This will fix the message if CDC was missing before - r.updateInventoryValidMessage(clp, cdc.Name, "found", false, logger) - // The following part will try to fix the following scenarios: - // 1. CDC condition is available but it has reference to existing ClusterDeployment - // 2. CDC is reserved but it doesn't have a reference to a ClusterDeployment - currentAvailability := conditionsv1.FindStatusCondition(cdc.Status.Conditions, conditionsv1.ConditionAvailable) - if cdc.Status.ClusterDeploymentRef != nil { - cd := &hivev1.ClusterDeployment{} - ref := client.ObjectKey{Namespace: cdc.Status.ClusterDeploymentRef.Name, Name: cdc.Status.ClusterDeploymentRef.Name} - if err := r.Client.Get(context.Background(), ref, cd); err != nil { - if apierrors.IsNotFound(err) { - cdc.Status.ClusterDeploymentRef = nil - } - } - } - availableWithCD := (currentAvailability != nil && currentAvailability.Status == corev1.ConditionTrue) && cdc.Status.ClusterDeploymentRef != nil - reservedWithoutCD := (currentAvailability != nil && currentAvailability.Status == corev1.ConditionFalse) && cdc.Status.ClusterDeploymentRef == nil - if availableWithCD || reservedWithoutCD { - status := true - msg := "available" - if availableWithCD { - status = false - msg = "fixed reservation" - } - - updateCDC(cdc, status, msg) - } - - } - r.updateInventoryValidMessage(clp, "", "", true, logger) // just the inventory condition is being updated -} - -func (r *ReconcileClusterPool) setCustomizationAvailabilityCondition(cdc *hivev1.ClusterDeploymentCustomization, available bool, message string, logger log.FieldLogger) error { - status := corev1.ConditionTrue - reason := "Available" - - if !available { - status = corev1.ConditionFalse - reason = "Reserved" - } - - existingCondition := conditionsv1.FindStatusCondition(cdc.Status.Conditions, conditionsv1.ConditionAvailable) - if existingCondition.Reason != reason || existingCondition.Message != message { - conditionsv1.SetStatusConditionNoHeartbeat(&cdc.Status.Conditions, conditionsv1.Condition{ - Type: conditionsv1.ConditionAvailable, - Status: status, - Reason: reason, - Message: message, - }) - - if err := r.Status().Update(context.TODO(), cdc); err != nil { - logger.WithError(err).Log(controllerutils.LogLevel(err), "could not update ClusterDeploymentCustomization conditions") - return errors.Wrap(err, "could not update ClusterDeploymentCustomization conditions") - } - } - - return nil -} - // reconcileRunningClusters ensures the oldest unassigned clusters are set to running, and the // remainder are set to hibernating. The number of clusters we set to running is determined by // adding the cluster's configured runningCount to the number of unsatisfied claims for which we're @@ -899,7 +767,7 @@ func (r *ReconcileClusterPool) patchInstallConfig(clp *hivev1.ClusterPool, cd *h cdc := &hivev1.ClusterDeploymentCustomization{} if err := r.Client.Get(context.Background(), client.ObjectKey{Namespace: clp.Namespace, Name: cd.Spec.ClusterPoolRef.CustomizationRef.Name}, cdc); err != nil { if apierrors.IsNotFound(err) { - r.updateInventoryValidMessage(clp, cdc.Name, "missing", true, logger) + return errors.New("missing customization") } return err } @@ -914,13 +782,14 @@ func (r *ReconcileClusterPool) patchInstallConfig(clp *hivev1.ClusterPool, cd *h Value: yamlpatch.NewNode(&value), }) } - installConfig, err := newPatch.Apply(secret.Data["install-config.yaml"]) + installConfig, err := newPatch.Apply([]byte(secret.StringData["install-config.yaml"])) if err != nil { - r.updateInventoryValidMessage(clp, cdc.Name, string(hivev1.LastApplyBrokenSyntax), true, logger) + cdcs := cdcCollection{syntax: []*hivev1.ClusterDeploymentCustomization{cdc}} + cdcs.SyncClusterDeploymentCustomizationAssignments(r.Client, clp, &cdCollection{}, logger) cdc.Status.LastApplyStatus = hivev1.LastApplyBrokenSyntax if updateErr := r.Status().Update(context.Background(), cdc); updateErr != nil { if apierrors.IsNotFound(err) { - r.updateInventoryValidMessage(clp, cdc.Name, "missing", true, logger) + return errors.New("missing customization") } } @@ -930,6 +799,7 @@ func (r *ReconcileClusterPool) patchInstallConfig(clp *hivev1.ClusterPool, cd *h // Reserving ClusterDeploymentCustomization existingCondition := conditionsv1.FindStatusCondition(cdc.Status.Conditions, conditionsv1.ConditionAvailable) if existingCondition.Reason != "Reserved" || existingCondition.Message != "reserved" { + cdc.Status.ClusterPoolRef = &corev1.LocalObjectReference{Name: clp.Name} cdc.Status.LastApplyTime = metav1.Now() conditionsv1.SetStatusConditionNoHeartbeat(&cdc.Status.Conditions, conditionsv1.Condition{ Type: conditionsv1.ConditionAvailable, @@ -940,13 +810,13 @@ func (r *ReconcileClusterPool) patchInstallConfig(clp *hivev1.ClusterPool, cd *h if err := r.Status().Update(context.Background(), cdc); err != nil { if apierrors.IsNotFound(err) { - r.updateInventoryValidMessage(clp, cdc.Name, "missing", true, logger) + return errors.New("missing customization") } return err } } - secret.Data["install-config.yaml"] = installConfig + secret.StringData["install-config.yaml"] = string(installConfig) return nil } @@ -1145,73 +1015,6 @@ func (r *ReconcileClusterPool) setAvailableCapacityCondition(pool *hivev1.Cluste return nil } -// updateInventoryValidMessage maintaince cluster pool inventory status. If there is an issue with any of the ClusterDeploymentCustomizations then the inventory is invalid. -// When the inventory is invalid, the functions keeps track of the reason and related ClusterDeploymentCustomization name in the condition message. -func (r *ReconcileClusterPool) updateInventoryValidMessage(pool *hivev1.ClusterPool, cdcName string, cdcState string, update bool, logger log.FieldLogger) error { - currentCondition := controllerutils.FindClusterPoolCondition(pool.Status.Conditions, hivev1.ClusterPoolInventoryValidCondition) - // Decode the current message - curMap := map[string]string{} - if err := json.Unmarshal([]byte(currentCondition.Message), &curMap); err != nil { - log.WithField("message", currentCondition.Message).Warning("Could not decode current message, reset inventory status") - } - - // Replace the entry for this CDC -- but omit if it's valid - if cdcName != "" && cdcState != "" { - delete(curMap, cdcName) - if cdcState != hivev1.InventoryReasonValid || cdcState != "found" { - curMap[cdcName] = string(cdcState) - } - } - - // Default condition settings to "valid" - message := "" - reason := hivev1.InventoryReasonValid - - if len(curMap) != 0 { - for _, c := range curMap { - switch { - case reason == hivev1.InventoryReasonValid: - reason = c - case reason != c: - reason = hivev1.InventoryReasonInvalid - break - } - } - messageByte, err := json.Marshal(curMap) - if err != nil { - log.WithError(err).Error("Could not encode current message") - return err - } - message = string(messageByte) - } - - status := corev1.ConditionTrue - if reason != hivev1.InventoryReasonValid { - status = corev1.ConditionFalse - } - - conditions, changed := controllerutils.SetClusterPoolConditionWithChangeCheck( - pool.Status.Conditions, - hivev1.ClusterPoolInventoryValidCondition, - status, - reason, - message, - controllerutils.UpdateConditionIfReasonOrMessageChange, - ) - - if changed { - pool.Status.Conditions = conditions - if update { - if err := r.Status().Update(context.TODO(), pool); err != nil { - logger.WithError(err).Log(controllerutils.LogLevel(err), "could not update ClusterPool conditions") - return errors.Wrap(err, "could not update ClusterPool conditions") - } - } - } - - return nil -} - func (r *ReconcileClusterPool) verifyClusterImageSet(pool *hivev1.ClusterPool, logger log.FieldLogger) error { err := r.Get(context.Background(), client.ObjectKey{Name: pool.Spec.ImageSetRef.Name}, &hivev1.ClusterImageSet{}) if err != nil { @@ -1327,40 +1130,36 @@ func (r *ReconcileClusterPool) getInventoryCustomization(pool *hivev1.ClusterPoo if pool.Spec.Inventory == nil { return nil } - var inventory []hivev1.ClusterDeploymentCustomization - for _, entry := range pool.Spec.Inventory { - if entry.Kind == hivev1.ClusterDeploymentCustomizationInventoryEntry { - cdc := &hivev1.ClusterDeploymentCustomization{} - if err := r.Client.Get(context.Background(), client.ObjectKey{Namespace: pool.Namespace, Name: entry.Name}, cdc); err != nil { - if apierrors.IsNotFound(err) { - r.updateInventoryValidMessage(pool, cdc.Name, "missing", true, logger) - } - return err - } - currentAvailability := conditionsv1.FindStatusCondition(cdc.Status.Conditions, conditionsv1.ConditionAvailable) - if currentAvailability == nil || currentAvailability.Status == corev1.ConditionTrue { - inventory = append(inventory, *cdc) - } - } + cdcs, err := getAllCustomizationsForPool(r.Client, pool, logger) + if err != nil { + return err } - if len(inventory) > 0 { - sort.Slice( - inventory, - func(i, j int) bool { - if inventory[i].Status.LastApplyStatus == inventory[j].Status.LastApplyStatus { - return inventory[i].Status.LastApplyTime.Before(&inventory[j].Status.LastApplyTime) - } - if inventory[i].Status.LastApplyStatus == hivev1.LastApplySucceeded { - return inventory[i].Name < inventory[j].Name - } - return inventory[i].Name < inventory[j].Name - }, - ) - cd.Spec.ClusterPoolRef.CustomizationRef = &corev1.LocalObjectReference{Name: inventory[0].Name} - return nil + + if len(cdcs.unassigned) == 0 { + return errors.New("no customization available") } - return errors.New("no customization available") + sort.Slice( + cdcs.unassigned, + func(i, j int) bool { + iName := cdcs.unassigned[i].Name + jName := cdcs.unassigned[j].Name + iStatus := cdcs.unassigned[i].Status.LastApplyStatus + jStatus := cdcs.unassigned[j].Status.LastApplyStatus + iTime := cdcs.unassigned[i].Status.LastApplyTime + jTime := cdcs.unassigned[j].Status.LastApplyTime + if iStatus == jStatus { + return iTime.Before(&jTime) + } + if iStatus == hivev1.LastApplySucceeded { + return iName < jName + } + return iName < jName + }, + ) + + cd.Spec.ClusterPoolRef.CustomizationRef = &corev1.LocalObjectReference{Name: cdcs.unassigned[0].Name} + return nil } func isInstallConfigSecret(obj interface{}) *corev1.Secret { diff --git a/pkg/controller/clusterpool/clusterpool_controller_test.go b/pkg/controller/clusterpool/clusterpool_controller_test.go index 52c578db082..28b792f3103 100644 --- a/pkg/controller/clusterpool/clusterpool_controller_test.go +++ b/pkg/controller/clusterpool/clusterpool_controller_test.go @@ -1506,7 +1506,7 @@ func TestReconcileClusterPool(t *testing.T) { if !test.noCustomization { test.existing = append( test.existing, - testcdc.FullBuilder(testNamespace, cdcName, scheme).Build(), + testcdc.FullBuilder(testNamespace, cdcName, scheme).Build(testcdc.Available()), ) } fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(test.existing...).Build() diff --git a/pkg/controller/clusterpool/collections.go b/pkg/controller/clusterpool/collections.go index b57adfe1916..0a0ba065ad9 100644 --- a/pkg/controller/clusterpool/collections.go +++ b/pkg/controller/clusterpool/collections.go @@ -2,6 +2,7 @@ package clusterpool import ( "context" + "encoding/json" "errors" "fmt" "sort" @@ -15,6 +16,7 @@ import ( utilerrors "k8s.io/apimachinery/pkg/util/errors" "sigs.k8s.io/controller-runtime/pkg/client" + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" hivev1 "github.com/openshift/hive/apis/hive/v1" "github.com/openshift/hive/pkg/constants" controllerutils "github.com/openshift/hive/pkg/controller/utils" @@ -597,6 +599,247 @@ func (cds *cdCollection) Delete(c client.Client, cdName string) error { return nil } +type cdcCollection struct { + // Unclaimed by any cluster pool CD any not broken + unassigned []*hivev1.ClusterDeploymentCustomization + // Listed in the cluster pool inventory but not found + missing []string + // Used by some cluster deployment + reserved []*hivev1.ClusterDeploymentCustomization + // Last Cluster Deployment failed on provision + cloud []*hivev1.ClusterDeploymentCustomization + // Failed to apply patches for this cluster pool + syntax []*hivev1.ClusterDeploymentCustomization + // All CDCs in this pool + byCDCName map[string]*hivev1.ClusterDeploymentCustomization +} + +// getAllCustomizationsForPool is the constructor for a cdcCollection for all of the +// ClusterDeploymentCustomizations that are related to specified pool. +func getAllCustomizationsForPool(c client.Client, pool *hivev1.ClusterPool, logger log.FieldLogger) (*cdcCollection, error) { + if pool.Spec.Inventory == nil { + return nil, nil + } + cdcList := &hivev1.ClusterDeploymentCustomizationList{} + if err := c.List( + context.Background(), cdcList, + client.MatchingFields{claimClusterPoolIndex: pool.Name}, + client.InNamespace(pool.Namespace)); err != nil { + logger.WithError(err).Error("error listing ClusterClaims") + return nil, err + } + + cdcCol := cdcCollection{ + unassigned: make([]*hivev1.ClusterDeploymentCustomization, 0), + missing: make([]string, 0), + reserved: make([]*hivev1.ClusterDeploymentCustomization, 0), + cloud: make([]*hivev1.ClusterDeploymentCustomization, 0), + syntax: make([]*hivev1.ClusterDeploymentCustomization, 0), + byCDCName: make(map[string]*hivev1.ClusterDeploymentCustomization), + } + + for _, item := range pool.Spec.Inventory { + missing := true + for _, cdc := range cdcList.Items { + if cdc.Name != item.Name { + continue + } + missing = false + cdcCol.byCDCName[item.Name] = &cdc + if cdRef := cdc.Status.ClusterDeploymentRef; cdRef == nil { + cdcCol.unassigned = append(cdcCol.unassigned, &cdc) + } else { + cdcCol.reserved = append(cdcCol.unassigned, &cdc) + } + if cdc.Status.LastApplyStatus == hivev1.LastApplyBrokenCloud { + cdcCol.cloud = append(cdcCol.cloud, &cdc) + } + if cdc.Status.LastApplyStatus == hivev1.LastApplyBrokenSyntax { + cdcCol.syntax = append(cdcCol.cloud, &cdc) + } + } + if missing { + cdcCol.missing = append(cdcCol.missing, item.Name) + } + } + + logger.WithFields(log.Fields{ + "reservedCount": len(cdcCol.reserved), + "unassignedCount": len(cdcCol.unassigned), + "brokenByCloudCount": len(cdcCol.cloud), + "brokenBySyntaxCount": len(cdcCol.syntax), + "missingCount": len(cdcCol.missing), + }).Debug("found ClusterDeploymentCustomizations for ClusterPool") + + return &cdcCol, nil +} + +// SyncClusterDeploymentCustomizations makes sure that CDCs relagted to the pool, the pool inventory and related ClusterDeployments +// are in the correct state. +func (cdcs *cdcCollection) SyncClusterDeploymentCustomizationAssignments(c client.Client, pool *hivev1.ClusterPool, cds *cdCollection, logger log.FieldLogger) error { + if pool.Spec.Inventory == nil { + return nil + } + + contains := func(s []string, str string) bool { + for _, v := range s { + if v == str { + return true + } + } + + return false + } + + cdNames := []string{} + for cdName := range cds.byCDName { + cdNames = append(cdNames, cdName) + } + + for _, cdc := range cdcs.reserved { + if !contains(cdNames, cdc.Status.ClusterDeploymentRef.Name) { + // If there is no CD, but CDC is reserved, then we release the CDC + if err := setCustomizationAvailabilityCondition(c, cdc, nil, logger); err != nil { + return err + } + } + } + + // Make sure CD <=> CDC links are legit; repair them if not. + for _, cdc := range cdcs.unassigned { + for _, cd := range cds.byCDName { + if cd.Spec.ClusterPoolRef.CustomizationRef != nil && cd.Spec.ClusterPoolRef.CustomizationRef.Name == cdc.Name { + if err := setCustomizationAvailabilityCondition(c, cdc, cd, logger); err != nil { + return err + } + } + } + } + + // Notice a Broken CD => update the CDC's LastApplyStatus to BrokenByCloud; + for _, cd := range cds.broken { + cdcRef := cd.Spec.ClusterPoolRef.CustomizationRef + if cdcRef == nil { + continue + } + + update := true + for _, cdc := range cdcs.cloud { + if cdc.Name == cdcRef.Name { + update = false + break + } + } + + if update { + cdc := cdcs.byCDCName[cd.Spec.ClusterPoolRef.CustomizationRef.Name] + cdcs.cloud = append(cdcs.cloud, cdc) + cdc.Status.LastApplyStatus = hivev1.LastApplyBrokenCloud + cdc.Status.LastApplyTime = metav1.Now() + if err := c.Status().Update(context.TODO(), cdc); err != nil { + return err + } + } + } + // Notice a CD has finished installing => update the CDC's LastApplyStatus to Success; + for _, cd := range cds.Unassigned(false) { + cdc := cdcs.byCDCName[cd.Spec.ClusterPoolRef.CustomizationRef.Name] + if cdc.Status.LastApplyStatus != hivev1.LastApplySucceeded { + cdc.Status.LastApplyStatus = hivev1.LastApplySucceeded + cdc.Status.LastApplyTime = metav1.Now() + if err := c.Status().Update(context.TODO(), cdc); err != nil { + return err + } + } + } + + // Update Cluster Pool Inventory Condition + message := "" + status := corev1.ConditionTrue + reason := hivev1.InventoryReasonValid + if (len(cdcs.syntax) + len(cdcs.cloud) + len(cdcs.missing)) > 0 { + messageByte, err := json.Marshal(cdcs) + if err != nil { + return err + } + message = string(messageByte) + status = corev1.ConditionFalse + reason = hivev1.InventoryReasonInvalid + } + + conditions, changed := controllerutils.SetClusterPoolConditionWithChangeCheck( + pool.Status.Conditions, + hivev1.ClusterPoolInventoryValidCondition, + status, + reason, + message, + controllerutils.UpdateConditionIfReasonOrMessageChange, + ) + + if changed { + pool.Status.Conditions = conditions + if err := c.Status().Update(context.TODO(), pool); err != nil { + return err + } + } + + return nil +} + +func (cdcs *cdcCollection) MarshalJSON() ([]byte, error) { + cloud := []string{} + for _, cdc := range cdcs.cloud { + cloud = append(cloud, cdc.Name) + } + syntax := []string{} + for _, cdc := range cdcs.syntax { + cloud = append(syntax, cdc.Name) + } + + return json.Marshal(&struct { + BrokenByCloud []string + BrokenBySyntax []string + Missing []string + }{ + BrokenByCloud: cloud, + BrokenBySyntax: syntax, + Missing: cdcs.missing, + }) +} + +func setCustomizationAvailabilityCondition(c client.Client, cdc *hivev1.ClusterDeploymentCustomization, cd *hivev1.ClusterDeployment, logger log.FieldLogger) error { + status := corev1.ConditionTrue + reason := "Available" + message := "available" + cdc.Status.ClusterDeploymentRef = nil + cdc.Status.ClusterPoolRef = nil + + if cd != nil { + status = corev1.ConditionFalse + reason = "Reserved" + message = "reserved" + cdc.Status.ClusterDeploymentRef = &corev1.LocalObjectReference{Name: cd.Name} + cdc.Status.ClusterPoolRef = &corev1.LocalObjectReference{Name: cd.Spec.ClusterPoolRef.PoolName} + } + + existingCondition := conditionsv1.FindStatusCondition(cdc.Status.Conditions, conditionsv1.ConditionAvailable) + if existingCondition.Reason != reason || existingCondition.Message != message { + conditionsv1.SetStatusConditionNoHeartbeat(&cdc.Status.Conditions, conditionsv1.Condition{ + Type: conditionsv1.ConditionAvailable, + Status: status, + Reason: reason, + Message: message, + }) + + if err := c.Status().Update(context.TODO(), cdc); err != nil { + logger.WithError(err).Log(controllerutils.LogLevel(err), "could not update ClusterDeploymentCustomization conditions") + return err + } + } + + return nil +} + // setCDsCurrentCondition idempotently sets the ClusterDeploymentsCurrent condition on the // ClusterPool according to whether all unassigned CDs have the same PoolVersion as the pool. func setCDsCurrentCondition(c client.Client, cds *cdCollection, clp *hivev1.ClusterPool, poolVersion string) error { diff --git a/pkg/test/clusterdeploymentcustomization/clusterdeploymentcustomization.go b/pkg/test/clusterdeploymentcustomization/clusterdeploymentcustomization.go index db2f28dd60b..2aee5c19089 100644 --- a/pkg/test/clusterdeploymentcustomization/clusterdeploymentcustomization.go +++ b/pkg/test/clusterdeploymentcustomization/clusterdeploymentcustomization.go @@ -3,7 +3,9 @@ package clusterdeploymentcustomization import ( "k8s.io/apimachinery/pkg/runtime" + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" hivev1 "github.com/openshift/hive/apis/hive/v1" + corev1 "k8s.io/api/core/v1" "github.com/openshift/hive/pkg/test/generic" ) @@ -67,7 +69,18 @@ func (b *builder) GenericOptions(opts ...generic.Option) Builder { // Generic allows common functions applicable to all objects to be used as Options to Build func Generic(opt generic.Option) Option { - return func(clusterDeployment *hivev1.ClusterDeploymentCustomization) { - opt(clusterDeployment) + return func(cdc *hivev1.ClusterDeploymentCustomization) { + opt(cdc) + } +} + +func Available() Option { + return func(cdc *hivev1.ClusterDeploymentCustomization) { + cdc.Status.Conditions = append(cdc.Status.Conditions, conditionsv1.Condition{ + Type: conditionsv1.ConditionAvailable, + Status: corev1.ConditionTrue, + Reason: "Available", + Message: "available", + }) } } diff --git a/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go b/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go index baf95efc1a8..059e10ba6c4 100644 --- a/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go +++ b/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go @@ -67,6 +67,10 @@ type ClusterDeploymentCustomizationStatus struct { // +optional ClusterDeploymentRef *corev1.LocalObjectReference `json:"clusterDeploymentRef,omitempty"` + // ClusterPoolRef is the name of the current cluster pool the CDC used at. + // +optional + ClusterPoolRef *corev1.LocalObjectReference `json:"clusterPoolRef,omitempty"` + // LastApplyTime indicates the time when the customization was applied on a cluster deployment. // +optional LastApplyTime metav1.Time `json:"lastApplyTime,omitempty"` diff --git a/vendor/github.com/openshift/hive/apis/hive/v1/clusterpool_types.go b/vendor/github.com/openshift/hive/apis/hive/v1/clusterpool_types.go index f7ea0ffc9b4..37efd25a558 100644 --- a/vendor/github.com/openshift/hive/apis/hive/v1/clusterpool_types.go +++ b/vendor/github.com/openshift/hive/apis/hive/v1/clusterpool_types.go @@ -222,20 +222,12 @@ const ( ClusterPoolInventoryValidCondition ClusterPoolConditionType = "InventoryValid" ) -// Inventory (in)valid reasons const ( // InventoryReasonValid is used when all ClusterDeploymentCustomization are - // available and when used the ClusterDeployments are successfully installed + // available and when used the ClusterDeployments are successfully installed. InventoryReasonValid = "Valid" - // InventoryReasonMissing is used when one or more ClusterDeploymentCustomization are missing - InventoryReasonMissing = "Missing" - // InventoryReasonFound is used cancel a missing ClusterDeploymentCustomization - InventoryReasonFound = "Found" - // InventoryReasonBrokenByCloud is used when one or more ClusterDeployments installations failed - InventoryReasonBrokenByCloud = "BrokenByCloud" - // InvenotryReasonBrokenBySyntax is used when one or more ClusterDeploymentCustomization patching failed - InvenotryReasonBrokenBySyntax = "BrokenBySyntax" - // InventoryReasonInvalid is used when multiple reasons and ClusterDeploymentCustomizations are incompatible + // InventoryReasonInvalid is used when there is something wrong with ClusterDeploymentCustomization, for example + // patching issue, provisioning failure, missing, etc. InventoryReasonInvalid = "Invalid" ) From 769cca3ecdc1e3e964ef9a64adc266b3042dd7f3 Mon Sep 17 00:00:00 2001 From: Alexander Braverman Masis Date: Tue, 3 May 2022 18:10:01 +0300 Subject: [PATCH 06/27] Unit tests --- .../clusterdeploymentcustomization_types.go | 3 + ...ft.io_clusterdeploymentcustomizations.yaml | 4 + hack/app-sre/saas-template.yaml | 4 + .../clusterdeployment_controller.go | 2 +- .../clusterpool/clusterpool_controller.go | 14 +- .../clusterpool_controller_test.go | 257 ++++++++++++++---- pkg/controller/clusterpool/collections.go | 37 +-- .../clusterdeployment/clusterdeployment.go | 2 +- .../clusterdeploymentcustomization.go | 48 ++++ pkg/test/clusterpool/clusterpool.go | 14 +- .../clusterdeploymentcustomization_types.go | 3 + 11 files changed, 317 insertions(+), 71 deletions(-) diff --git a/apis/hive/v1/clusterdeploymentcustomization_types.go b/apis/hive/v1/clusterdeploymentcustomization_types.go index 059e10ba6c4..a496bb9e2a8 100644 --- a/apis/hive/v1/clusterdeploymentcustomization_types.go +++ b/apis/hive/v1/clusterdeploymentcustomization_types.go @@ -56,6 +56,9 @@ type PatchEntity struct { // Path is the json path to the value to be modified // +required Path string `json:"path"` + // From is the json path to copy or move the value from + // +optional + From string `json:"from,omitempty"` // Value is the value to be used in the operation // +required Value string `json:"value"` diff --git a/config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml b/config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml index 89b11855ea3..76fd10c1d79 100644 --- a/config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml +++ b/config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml @@ -43,6 +43,10 @@ spec: description: PatchEntity represent a json patch (RFC 6902) to be applied to the install-config properties: + from: + description: From is the json path to copy or move the value + from + type: string op: description: 'Op is the operation to perform: add, remove, replace, move, copy, test' diff --git a/hack/app-sre/saas-template.yaml b/hack/app-sre/saas-template.yaml index 10b59cc1560..392cbb05251 100644 --- a/hack/app-sre/saas-template.yaml +++ b/hack/app-sre/saas-template.yaml @@ -297,6 +297,10 @@ objects: description: PatchEntity represent a json patch (RFC 6902) to be applied to the install-config properties: + from: + description: From is the json path to copy or move the value + from + type: string op: description: 'Op is the operation to perform: add, remove, replace, move, copy, test' diff --git a/pkg/controller/clusterdeployment/clusterdeployment_controller.go b/pkg/controller/clusterdeployment/clusterdeployment_controller.go index dde61468c64..8a146242624 100644 --- a/pkg/controller/clusterdeployment/clusterdeployment_controller.go +++ b/pkg/controller/clusterdeployment/clusterdeployment_controller.go @@ -1467,7 +1467,7 @@ func (r *ReconcileClusterDeployment) releaseCustomization(cd *hivev1.ClusterDepl changed := false existingCondition := conditionsv1.FindStatusCondition(cdc.Status.Conditions, conditionsv1.ConditionAvailable) - if existingCondition.Reason != "Available" || existingCondition.Message != "available" { + if existingCondition == nil || existingCondition.Reason != "Available" || existingCondition.Message != "available" { changed = true conditionsv1.SetStatusConditionNoHeartbeat(&cdc.Status.Conditions, conditionsv1.Condition{ Type: conditionsv1.ConditionAvailable, diff --git a/pkg/controller/clusterpool/clusterpool_controller.go b/pkg/controller/clusterpool/clusterpool_controller.go index 2053bcc6299..37d8daff6cf 100644 --- a/pkg/controller/clusterpool/clusterpool_controller.go +++ b/pkg/controller/clusterpool/clusterpool_controller.go @@ -779,6 +779,7 @@ func (r *ReconcileClusterPool) patchInstallConfig(clp *hivev1.ClusterPool, cd *h newPatch = append(newPatch, yamlpatch.Operation{ Op: yamlpatch.Op(patch.Op), Path: yamlpatch.OpPath(patch.Path), + From: yamlpatch.OpPath(patch.From), Value: yamlpatch.NewNode(&value), }) } @@ -798,7 +799,7 @@ func (r *ReconcileClusterPool) patchInstallConfig(clp *hivev1.ClusterPool, cd *h // Reserving ClusterDeploymentCustomization existingCondition := conditionsv1.FindStatusCondition(cdc.Status.Conditions, conditionsv1.ConditionAvailable) - if existingCondition.Reason != "Reserved" || existingCondition.Message != "reserved" { + if existingCondition == nil || existingCondition.Status == corev1.ConditionFalse { cdc.Status.ClusterPoolRef = &corev1.LocalObjectReference{Name: clp.Name} cdc.Status.LastApplyTime = metav1.Now() conditionsv1.SetStatusConditionNoHeartbeat(&cdc.Status.Conditions, conditionsv1.Condition{ @@ -1148,11 +1149,20 @@ func (r *ReconcileClusterPool) getInventoryCustomization(pool *hivev1.ClusterPoo jStatus := cdcs.unassigned[j].Status.LastApplyStatus iTime := cdcs.unassigned[i].Status.LastApplyTime jTime := cdcs.unassigned[j].Status.LastApplyTime + if iStatus == "" { + iStatus = hivev1.LastApplySucceeded + } + if jStatus == "" { + iStatus = hivev1.LastApplySucceeded + } if iStatus == jStatus { return iTime.Before(&jTime) } if iStatus == hivev1.LastApplySucceeded { - return iName < jName + return false + } + if jStatus == hivev1.LastApplySucceeded { + return true } return iName < jName }, diff --git a/pkg/controller/clusterpool/clusterpool_controller_test.go b/pkg/controller/clusterpool/clusterpool_controller_test.go index 28b792f3103..ad71d16a86d 100644 --- a/pkg/controller/clusterpool/clusterpool_controller_test.go +++ b/pkg/controller/clusterpool/clusterpool_controller_test.go @@ -2,6 +2,7 @@ package clusterpool import ( "context" + "encoding/json" "sort" "testing" "time" @@ -22,6 +23,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/reconcile" + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" hivev1 "github.com/openshift/hive/apis/hive/v1" "github.com/openshift/hive/apis/hive/v1/aws" "github.com/openshift/hive/pkg/constants" @@ -39,7 +41,6 @@ const ( testLeasePoolName = "aws-us-east-1" credsSecretName = "aws-creds" imageSetName = "test-image-set" - cdcName = "test-cdc" ) func TestReconcileClusterPool(t *testing.T) { @@ -80,15 +81,14 @@ func TestReconcileClusterPool(t *testing.T) { }), ) - inventoryPoolVersion := "06983eaafac7f695" - inventroyPoolBuilder := initializedPoolBuilder.Options( - testcp.WithInventory([]hivev1.InventoryEntry{ - { - Kind: hivev1.ClusterDeploymentCustomizationInventoryEntry, - Name: cdcName, - }, + inventoryPoolBuilder := initializedPoolBuilder.Options( + testcp.WithInventory([]string{"test-cdc-1"}), + testcp.WithCondition(hivev1.ClusterPoolCondition{ + Status: corev1.ConditionUnknown, + Type: hivev1.ClusterPoolInventoryValidCondition, }), ) + cdBuilder := func(name string) testcd.Builder { return testcd.FullBuilder(name, name, scheme).Options( testcd.WithPowerState(hivev1.ClusterPowerStateHibernating), @@ -108,7 +108,6 @@ func TestReconcileClusterPool(t *testing.T) { existing []runtime.Object noClusterImageSet bool noCredsSecret bool - noCustomization bool inventory bool expectError bool expectedTotalClusters int @@ -120,17 +119,21 @@ func TestReconcileClusterPool(t *testing.T) { expectedCapacityStatus corev1.ConditionStatus expectedCDCurrentStatus corev1.ConditionStatus expectedInventoryVaildStatus corev1.ConditionStatus + expectedInventoryMessage map[string][]string expectedMissingDependenciesMessage string expectedAssignedClaims int expectedUnassignedClaims int expectedAssignedCDs int + expectedAssignedCDCs int expectedRunning int expectedLabels map[string]string // Tested on all clusters, so will not work if your test has pre-existing cds in the pool. // Map, keyed by claim name, of expected Status.Conditions['Pending'].Reason. // (The clusterpool controller always sets this condition's Status to True.) // Not checked if nil. expectedClaimPendingReasons map[string]string + expectedInventoryNext string expectPoolVersionChanged bool + expectedPoolVersion string }{ { name: "initialize conditions", @@ -189,68 +192,194 @@ func TestReconcileClusterPool(t *testing.T) { { name: "poolVersion changes with new Inventory", existing: []runtime.Object{ - initializedPoolBuilder.Build(testcp.WithInventory( - []hivev1.InventoryEntry{ - { - Kind: hivev1.ClusterDeploymentCustomizationInventoryEntry, - Name: cdcName, - }, - }, - )), + initializedPoolBuilder.Build(testcp.WithInventory([]string{"test-cdc-1"})), }, + inventory: true, expectPoolVersionChanged: true, }, { - name: "poolVersion doens't change with existing Inventory", + name: "poolVersion doens't change with existing inventory when entry added", existing: []runtime.Object{ - inventroyPoolBuilder.Build(testcp.WithInventory( - []hivev1.InventoryEntry{ - { - Kind: hivev1.ClusterDeploymentCustomizationInventoryEntry, - Name: "test-cdc-2", - }, - }, - )), + inventoryPoolBuilder.Build(testcp.WithInventory([]string{"test-cdc-1", "test-cdc-2"})), }, inventory: true, expectPoolVersionChanged: false, + expectedPoolVersion: "06983eaafac7f695", }, { - name: "poolVersion doens't change with existing Inventory 2", + name: "poolVersion changes when inventory removed", existing: []runtime.Object{ - inventroyPoolBuilder.Build(), + inventoryPoolBuilder.Build(testcp.WithInventory([]string{})), }, - inventory: true, - expectPoolVersionChanged: false, + inventory: false, + expectPoolVersionChanged: true, + expectedPoolVersion: "06983eaafac7f695", }, { - name: "customized clusterpool will create a cluster", + name: "cp with inventory and cdc exists is valid", existing: []runtime.Object{ - inventroyPoolBuilder.Build(testcp.WithSize(1)), + inventoryPoolBuilder.Build(testcp.WithSize(1)), + testcdc.FullBuilder(testNamespace, "test-cdc-1", scheme).Build(), }, inventory: true, expectedTotalClusters: 1, expectedObservedSize: 0, expectedObservedReady: 0, expectedInventoryVaildStatus: corev1.ConditionTrue, + expectedPoolVersion: "06983eaafac7f695", + expectedAssignedCDCs: 1, }, { - name: "customized clusterpool inventory valid", + name: "cp with inventory and cdc doesn't exist is not valid - missing", existing: []runtime.Object{ - inventroyPoolBuilder.Build(testcp.WithSize(1)), + inventoryPoolBuilder.Build(testcp.WithSize(1)), + }, + inventory: true, + expectedTotalClusters: 0, + expectedObservedSize: 0, + expectedObservedReady: 0, + expectedInventoryVaildStatus: corev1.ConditionFalse, + expectedInventoryMessage: map[string][]string{"Missing": {"test-cdc-1"}}, + expectedCDCurrentStatus: corev1.ConditionUnknown, + expectedPoolVersion: "06983eaafac7f695", + expectError: true, + }, + { + name: "cp with inventory and cdc patch broken is not valid - BrokenBySyntax", + existing: []runtime.Object{ + inventoryPoolBuilder.Build(testcp.WithSize(1)), + testcdc.FullBuilder(testNamespace, "test-cdc-1", scheme).Build( + testcdc.WithPatch("/broken/path", "replace", "x"), + ), + }, + inventory: true, + expectedTotalClusters: 0, + expectedObservedSize: 0, + expectedObservedReady: 0, + expectedInventoryVaildStatus: corev1.ConditionFalse, + expectedInventoryMessage: map[string][]string{"BrokenBySyntax": {"test-cdc-1"}}, + expectedPoolVersion: "06983eaafac7f695", + expectedCDCurrentStatus: corev1.ConditionUnknown, + expectError: true, + }, + { + name: "cp with inventory and cd provisioning failed is not valid - BrokenByCloud", + existing: []runtime.Object{ + inventoryPoolBuilder.Build(testcp.WithSize(1)), + testcdc.FullBuilder(testNamespace, "test-cdc-1", scheme).Build(), testcd.FullBuilder("c1", "c1", scheme).Build( - testcd.WithPoolVersion(inventoryPoolVersion), - testcd.WithPowerState(hivev1.ClusterPowerStateHibernating), + testcd.WithPoolVersion("06983eaafac7f695"), testcd.WithUnclaimedClusterPoolReference(testNamespace, testLeasePoolName), - testcd.WithClusterDeploymentCustomizationReference(cdcName), + testcd.WithCustomization("test-cdc-1"), + testcd.Broken(), + ), + }, + inventory: true, + expectedTotalClusters: 0, + expectedObservedSize: 1, + expectedObservedReady: 0, + expectedInventoryVaildStatus: corev1.ConditionFalse, + expectedInventoryMessage: map[string][]string{"BrokenByCloud": {"test-cdc-1"}}, + expectedPoolVersion: "06983eaafac7f695", + expectedAssignedCDCs: 1, + }, + { + name: "cp with inventory and good cdc is valid, cd created", + existing: []runtime.Object{ + inventoryPoolBuilder.Build(testcp.WithSize(1)), + testcdc.FullBuilder(testNamespace, "test-cdc-1", scheme).Build(), + unclaimedCDBuilder("c1").Build( + testcd.WithCustomization("test-cdc-1"), testcd.Running(), ), }, inventory: true, - expectedTotalClusters: 1, + expectedTotalClusters: 0, expectedObservedSize: 1, expectedObservedReady: 1, expectedInventoryVaildStatus: corev1.ConditionTrue, + expectedPoolVersion: "06983eaafac7f695", + expectedAssignedCDCs: 1, + }, + { + // Second prioritize by last used + name: "cp with inventory - correct prioritization - same status", + existing: []runtime.Object{ + initializedPoolBuilder.Build( + testcp.WithSize(1), + testcp.WithInventory([]string{"test-cdc-successful-old", "test-cdc-unused-new"}), + ), + testcdc.FullBuilder(testNamespace, "test-cdc-successful-old", scheme).Build( + testcdc.WithLastStatus(hivev1.LastApplySucceeded), + testcdc.WithLastTime(nowish.Add(-time.Hour)), + ), + testcdc.FullBuilder(testNamespace, "test-cdc-unused-new", scheme).Build( + testcdc.WithLastTime(nowish), + ), + }, + expectedTotalClusters: 1, + expectedInventoryVaildStatus: corev1.ConditionTrue, + expectedPoolVersion: "06983eaafac7f695", + expectedInventoryNext: "test-cdc-successful-old", + expectedAssignedCDCs: 1, + }, + { + // First prioritize unused/successful vs broken (cloud/syntax) + name: "cp with inventory - correct prioritization - successful vs broken", + existing: []runtime.Object{ + initializedPoolBuilder.Build( + testcp.WithSize(1), + testcp.WithInventory([]string{"test-cdc-successful-new", "test-cdc-broken-old"}), + ), + testcdc.FullBuilder(testNamespace, "test-cdc-broken-old", scheme).Build( + testcdc.WithLastStatus(hivev1.LastApplySucceeded), + testcdc.WithLastTime(nowish.Add(-time.Hour)), + ), + testcdc.FullBuilder(testNamespace, "test-cdc-successful-new", scheme).Build( + testcdc.WithLastStatus(hivev1.LastApplyBrokenCloud), + testcdc.WithLastTime(nowish), + ), + }, + expectedTotalClusters: 1, + expectedInventoryVaildStatus: corev1.ConditionFalse, + expectedPoolVersion: "06983eaafac7f695", + expectedInventoryNext: "test-cdc-successful-new", + expectedAssignedCDCs: 1, + }, + { + name: "cp with inventory - release cdc when cd is missing", + existing: []runtime.Object{ + inventoryPoolBuilder.Build(testcp.WithSize(1)), + testcdc.FullBuilder(testNamespace, "test-cdc-broken-old", scheme).Build( + testcdc.WithLastStatus(hivev1.LastApplySucceeded), + testcdc.WithLastTime(nowish.Add(-time.Hour)), + testcdc.WithPool(testLeasePoolName), + testcdc.WithCD("c1"), + testcdc.Reserved(), + ), + }, + expectedTotalClusters: 1, + expectedPoolVersion: "06983eaafac7f695", + expectedAssignedCDCs: 0, + }, + // #################### + { + name: "cp with inventory - fix cdc when cd reference exists", + existing: []runtime.Object{ + inventoryPoolBuilder.Build(testcp.WithSize(1)), + testcdc.FullBuilder(testNamespace, "test-cdc-1", scheme).Build( + testcdc.Available(), + ), + testcd.FullBuilder("c1", "c1", scheme).Build( + testcd.WithUnclaimedClusterPoolReference(testNamespace, testLeasePoolName), + testcd.WithCustomization("test-cdc-1"), + ), + }, + expectedTotalClusters: 1, + expectedObservedSize: 1, + expectedPoolVersion: "06983eaafac7f695", + expectedAssignedCDCs: 0, + expectedCDCurrentStatus: corev1.ConditionUnknown, }, { // This also proves we only delete one stale cluster at a time @@ -1487,6 +1616,9 @@ func TestReconcileClusterPool(t *testing.T) { } for _, test := range tests { + if test.name != "cp with inventory - fix cdc when cd reference exists" { + // continue + } t.Run(test.name, func(t *testing.T) { if !test.noClusterImageSet { test.existing = append( @@ -1503,11 +1635,8 @@ func TestReconcileClusterPool(t *testing.T) { Build(testsecret.WithDataKeyValue("dummykey", []byte("dummyval"))), ) } - if !test.noCustomization { - test.existing = append( - test.existing, - testcdc.FullBuilder(testNamespace, cdcName, scheme).Build(testcdc.Available()), - ) + if test.expectedPoolVersion == "" { + test.expectedPoolVersion = initialPoolVersion } fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(test.existing...).Build() logger := log.New() @@ -1544,10 +1673,7 @@ func TestReconcileClusterPool(t *testing.T) { assert.Equal(t, test.expectedObservedSize, pool.Status.Size, "unexpected observed size") assert.Equal(t, test.expectedObservedReady, pool.Status.Ready, "unexpected observed ready count") currentPoolVersion := calculatePoolVersion(pool) - expectedPoolVersion := initialPoolVersion - if test.inventory { - expectedPoolVersion = inventoryPoolVersion - } + expectedPoolVersion := test.expectedPoolVersion assert.Equal( t, test.expectPoolVersionChanged, currentPoolVersion != expectedPoolVersion, "expectPoolVersionChanged is %t\ninitial %q\nfinal %q", @@ -1586,7 +1712,25 @@ func TestReconcileClusterPool(t *testing.T) { inventoryValidCondition := controllerutils.FindClusterPoolCondition(pool.Status.Conditions, hivev1.ClusterPoolInventoryValidCondition) if assert.NotNil(t, inventoryValidCondition, "did not find InventoryValid condition") { assert.Equal(t, test.expectedInventoryVaildStatus, inventoryValidCondition.Status, - "unexpcted InventoryValid condition status") + "unexpcted InventoryValid condition status %s", inventoryValidCondition.Message) + } + } + + if test.expectedInventoryMessage != nil { + inventoryValidCondition := controllerutils.FindClusterPoolCondition(pool.Status.Conditions, hivev1.ClusterPoolInventoryValidCondition) + if assert.NotNil(t, inventoryValidCondition, "did not find InventoryValid condition") { + expectedInventoryMessage := map[string][]string{} + err := json.Unmarshal([]byte(inventoryValidCondition.Message), &expectedInventoryMessage) + if err != nil { + assert.Error(t, err, "unable to parse inventory condition message") + } + for key, value := range test.expectedInventoryMessage { + if val, ok := expectedInventoryMessage[key]; ok { + assert.ElementsMatch(t, value, val, "unexpected inventory message for %s: %s", key, inventoryValidCondition.Message) + } else { + assert.Fail(t, "expected inventory message to contain key %s: %s", key, inventoryValidCondition.Message) + } + } } } @@ -1662,6 +1806,23 @@ func TestReconcileClusterPool(t *testing.T) { } assert.Equal(t, test.expectedAssignedClaims, actualAssignedClaims, "unexpected number of assigned claims") assert.Equal(t, test.expectedUnassignedClaims, actualUnassignedClaims, "unexpected number of unassigned claims") + + actualAssignedCDCs := 0 + cdcs := &hivev1.ClusterDeploymentCustomizationList{} + err = fakeClient.List(context.Background(), cdcs) + require.NoError(t, err) + for _, cdc := range cdcs.Items { + if test.expectedInventoryNext != "" { + if cdc.Status.ClusterPoolRef != nil && cdc.Status.ClusterPoolRef.Name == testLeasePoolName { + assert.Equal(t, test.expectedInventoryNext, cdc.Name, "unexpected inventory next") + } + } + condition := conditionsv1.FindStatusCondition(cdc.Status.Conditions, conditionsv1.ConditionAvailable) + if condition != nil && condition.Status == corev1.ConditionFalse { + actualAssignedCDCs++ + } + } + assert.Equal(t, test.expectedAssignedCDCs, actualAssignedCDCs, "unexpected number of assigned CDCs") }) } } diff --git a/pkg/controller/clusterpool/collections.go b/pkg/controller/clusterpool/collections.go index 0a0ba065ad9..98ec9289588 100644 --- a/pkg/controller/clusterpool/collections.go +++ b/pkg/controller/clusterpool/collections.go @@ -625,7 +625,7 @@ func getAllCustomizationsForPool(c client.Client, pool *hivev1.ClusterPool, logg context.Background(), cdcList, client.MatchingFields{claimClusterPoolIndex: pool.Name}, client.InNamespace(pool.Namespace)); err != nil { - logger.WithError(err).Error("error listing ClusterClaims") + logger.WithError(err).Error("error listing ClusterDeploymentCustomizations") return nil, err } @@ -640,22 +640,23 @@ func getAllCustomizationsForPool(c client.Client, pool *hivev1.ClusterPool, logg for _, item := range pool.Spec.Inventory { missing := true - for _, cdc := range cdcList.Items { + for i, cdc := range cdcList.Items { + ref := &cdcList.Items[i] if cdc.Name != item.Name { continue } missing = false - cdcCol.byCDCName[item.Name] = &cdc + cdcCol.byCDCName[item.Name] = ref if cdRef := cdc.Status.ClusterDeploymentRef; cdRef == nil { - cdcCol.unassigned = append(cdcCol.unassigned, &cdc) + cdcCol.unassigned = append(cdcCol.unassigned, ref) } else { - cdcCol.reserved = append(cdcCol.unassigned, &cdc) + cdcCol.reserved = append(cdcCol.reserved, ref) } if cdc.Status.LastApplyStatus == hivev1.LastApplyBrokenCloud { - cdcCol.cloud = append(cdcCol.cloud, &cdc) + cdcCol.cloud = append(cdcCol.cloud, ref) } if cdc.Status.LastApplyStatus == hivev1.LastApplyBrokenSyntax { - cdcCol.syntax = append(cdcCol.cloud, &cdc) + cdcCol.syntax = append(cdcCol.cloud, ref) } } if missing { @@ -696,9 +697,9 @@ func (cdcs *cdcCollection) SyncClusterDeploymentCustomizationAssignments(c clien cdNames = append(cdNames, cdName) } + // If there is no CD, but CDC is reserved, then we release the CDC for _, cdc := range cdcs.reserved { if !contains(cdNames, cdc.Status.ClusterDeploymentRef.Name) { - // If there is no CD, but CDC is reserved, then we release the CDC if err := setCustomizationAvailabilityCondition(c, cdc, nil, logger); err != nil { return err } @@ -743,12 +744,16 @@ func (cdcs *cdcCollection) SyncClusterDeploymentCustomizationAssignments(c clien } // Notice a CD has finished installing => update the CDC's LastApplyStatus to Success; for _, cd := range cds.Unassigned(false) { - cdc := cdcs.byCDCName[cd.Spec.ClusterPoolRef.CustomizationRef.Name] - if cdc.Status.LastApplyStatus != hivev1.LastApplySucceeded { - cdc.Status.LastApplyStatus = hivev1.LastApplySucceeded - cdc.Status.LastApplyTime = metav1.Now() - if err := c.Status().Update(context.TODO(), cdc); err != nil { - return err + if cd.Spec.ClusterPoolRef.CustomizationRef == nil { + continue + } + if cdc, ok := cdcs.byCDCName[cd.Spec.ClusterPoolRef.CustomizationRef.Name]; ok { + if cdc.Status.LastApplyStatus != hivev1.LastApplySucceeded { + cdc.Status.LastApplyStatus = hivev1.LastApplySucceeded + cdc.Status.LastApplyTime = metav1.Now() + if err := c.Status().Update(context.TODO(), cdc); err != nil { + return err + } } } } @@ -793,7 +798,7 @@ func (cdcs *cdcCollection) MarshalJSON() ([]byte, error) { } syntax := []string{} for _, cdc := range cdcs.syntax { - cloud = append(syntax, cdc.Name) + syntax = append(syntax, cdc.Name) } return json.Marshal(&struct { @@ -823,7 +828,7 @@ func setCustomizationAvailabilityCondition(c client.Client, cdc *hivev1.ClusterD } existingCondition := conditionsv1.FindStatusCondition(cdc.Status.Conditions, conditionsv1.ConditionAvailable) - if existingCondition.Reason != reason || existingCondition.Message != message { + if existingCondition == nil || existingCondition.Reason != reason || existingCondition.Message != message { conditionsv1.SetStatusConditionNoHeartbeat(&cdc.Status.Conditions, conditionsv1.Condition{ Type: conditionsv1.ConditionAvailable, Status: status, diff --git a/pkg/test/clusterdeployment/clusterdeployment.go b/pkg/test/clusterdeployment/clusterdeployment.go index 13764fbf7d7..3a2cc2215f1 100644 --- a/pkg/test/clusterdeployment/clusterdeployment.go +++ b/pkg/test/clusterdeployment/clusterdeployment.go @@ -245,7 +245,7 @@ func WithClusterMetadata(clusterMetadata *hivev1.ClusterMetadata) Option { } } -func WithClusterDeploymentCustomizationReference(cdcName string) Option { +func WithCustomization(cdcName string) Option { return func(clusterDeployment *hivev1.ClusterDeployment) { clusterDeployment.Spec.ClusterPoolRef.CustomizationRef = &corev1.LocalObjectReference{Name: cdcName} } diff --git a/pkg/test/clusterdeploymentcustomization/clusterdeploymentcustomization.go b/pkg/test/clusterdeploymentcustomization/clusterdeploymentcustomization.go index 2aee5c19089..3ab7ab0e765 100644 --- a/pkg/test/clusterdeploymentcustomization/clusterdeploymentcustomization.go +++ b/pkg/test/clusterdeploymentcustomization/clusterdeploymentcustomization.go @@ -1,11 +1,14 @@ package clusterdeploymentcustomization import ( + "time" + "k8s.io/apimachinery/pkg/runtime" conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" hivev1 "github.com/openshift/hive/apis/hive/v1" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/openshift/hive/pkg/test/generic" ) @@ -84,3 +87,48 @@ func Available() Option { }) } } + +func Reserved() Option { + return func(cdc *hivev1.ClusterDeploymentCustomization) { + cdc.Status.Conditions = append(cdc.Status.Conditions, conditionsv1.Condition{ + Type: conditionsv1.ConditionAvailable, + Status: corev1.ConditionFalse, + Reason: "Reserved", + Message: "reserved", + }) + } +} + +func WithPatch(path, op, value string) Option { + return func(cdc *hivev1.ClusterDeploymentCustomization) { + cdc.Spec.InstallConfigPatches = append(cdc.Spec.InstallConfigPatches, hivev1.PatchEntity{ + Path: path, + Op: op, + Value: value, + }) + } +} + +func WithLastStatus(status hivev1.LastApplyStatusType) Option { + return func(cdc *hivev1.ClusterDeploymentCustomization) { + cdc.Status.LastApplyStatus = status + } +} + +func WithLastTime(lastTime time.Time) Option { + return func(cdc *hivev1.ClusterDeploymentCustomization) { + cdc.Status.LastApplyTime = metav1.NewTime(lastTime) + } +} + +func WithPool(name string) Option { + return func(cdc *hivev1.ClusterDeploymentCustomization) { + cdc.Status.ClusterPoolRef = &corev1.LocalObjectReference{Name: name} + } +} + +func WithCD(name string) Option { + return func(cdc *hivev1.ClusterDeploymentCustomization) { + cdc.Status.ClusterDeploymentRef = &corev1.LocalObjectReference{Name: name} + } +} diff --git a/pkg/test/clusterpool/clusterpool.go b/pkg/test/clusterpool/clusterpool.go index cf3d3559276..c2f481f80ff 100644 --- a/pkg/test/clusterpool/clusterpool.go +++ b/pkg/test/clusterpool/clusterpool.go @@ -185,8 +185,16 @@ func WithRunningCount(size int) Option { } } -func WithInventory(inventory []hivev1.InventoryEntry) Option { - return func(clusterPool *hivev1.ClusterPool) { - clusterPool.Spec.Inventory = inventory +func WithInventory(cdcs []string) Option { + return func(clusterPool *hivev1.ClusterPool) { + if len(cdcs) == 0 { + clusterPool.Spec.Inventory = nil + } else { + inventory := []hivev1.InventoryEntry{} + for _, cdc := range cdcs { + inventory = append(inventory, hivev1.InventoryEntry{Name: cdc}) + } + clusterPool.Spec.Inventory = inventory + } } } diff --git a/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go b/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go index 059e10ba6c4..a496bb9e2a8 100644 --- a/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go +++ b/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go @@ -56,6 +56,9 @@ type PatchEntity struct { // Path is the json path to the value to be modified // +required Path string `json:"path"` + // From is the json path to copy or move the value from + // +optional + From string `json:"from,omitempty"` // Value is the value to be used in the operation // +required Value string `json:"value"` From 9db55fab9636d445829d7d0f00d361c92813dc75 Mon Sep 17 00:00:00 2001 From: Alexander Braverman Masis Date: Wed, 11 May 2022 14:52:08 +0300 Subject: [PATCH 07/27] ApplySucceeded condition --- .../clusterdeploymentcustomization_types.go | 31 +- apis/hive/v1/zz_generated.deepcopy.go | 6 +- ...ft.io_clusterdeploymentcustomizations.yaml | 11 +- go.mod | 2 +- go.sum | 3 +- hack/app-sre/saas-template.yaml | 11 +- .../clusterdeployment_controller.go | 42 +- .../clusterpool/clusterpool_controller.go | 120 ++---- .../clusterpool_controller_test.go | 41 +- pkg/controller/clusterpool/collections.go | 374 +++++++++++++----- .../hibernation_controller_test.go | 8 - pkg/controller/utils/clusterdeployment.go | 6 - .../clusterdeploymentcustomization.go | 32 +- .../conditions/v1/conditions.go | 46 ++- .../clusterdeploymentcustomization_types.go | 31 +- .../apis/hive/v1/zz_generated.deepcopy.go | 6 +- vendor/modules.txt | 2 +- 17 files changed, 431 insertions(+), 341 deletions(-) diff --git a/apis/hive/v1/clusterdeploymentcustomization_types.go b/apis/hive/v1/clusterdeploymentcustomization_types.go index a496bb9e2a8..6d280e9a7da 100644 --- a/apis/hive/v1/clusterdeploymentcustomization_types.go +++ b/apis/hive/v1/clusterdeploymentcustomization_types.go @@ -6,25 +6,20 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// LastApplyStatusType indicates the status of the customization on the last -// applied cluster deployment. This is used for inventory sorting process to -// reduce the likelihood of using a broken customization repeatedly. -type LastApplyStatusType string - const ( - // LastApplySucceeded indicates that the customization + // CustomizationApplyReasonSucceeded indicates that the customization // worked properly on the last applied cluster deployment. - LastApplySucceeded LastApplyStatusType = "Succeeded" - // LastApplyBrokenSyntax indicates that Hive failed to apply + CustomizationApplyReasonSucceeded = "Succeeded" + // CustomizationApplyReasonBrokenSyntax indicates that Hive failed to apply // customization patches on install-config. More details would be found in // Valid condition message. - LastApplyBrokenSyntax LastApplyStatusType = "BrokenBySyntax" - // LastApplyBrokenCloud indicates that cluster deployment provision has failed + CustomizationApplyReasonBrokenSyntax = "BrokenBySyntax" + // CustomizationApplyReasonBrokenCloud indicates that cluster deployment provision has failed // when using this customization. More details would be found in the Valid condition message. - LastApplyBrokenCloud LastApplyStatusType = "BrokenByCloud" - // LastApplyInstallationPending indicates that the customization patches have + CustomizationApplyReasonBrokenCloud = "BrokenByCloud" + // CustomizationApplyReasonInstallationPending indicates that the customization patches have // been successfully applied but provisioning is not completed yet. - LastApplyInstallationPending LastApplyStatusType = "InstallationPending" + CustomizationApplyReasonInstallationPending = "InstallationPending" ) // +genclient @@ -74,13 +69,9 @@ type ClusterDeploymentCustomizationStatus struct { // +optional ClusterPoolRef *corev1.LocalObjectReference `json:"clusterPoolRef,omitempty"` - // LastApplyTime indicates the time when the customization was applied on a cluster deployment. - // +optional - LastApplyTime metav1.Time `json:"lastApplyTime,omitempty"` - - // LastApplyStatus indicates the customization status in the last applied cluster deployment. + // LastAppliedConfiguration contains the last applied patches to the install-config. // +optional - LastApplyStatus LastApplyStatusType `json:"lastApplyStatus,omitempty"` + LastAppliedConfiguration string `json:"lastAppliedConfiguration,omitempty"` // Conditions describes the state of the operator's reconciliation functionality. // +patchMergeKey=type @@ -90,7 +81,7 @@ type ClusterDeploymentCustomizationStatus struct { } const ( - ClusterDeploymentCustomizationValid conditionsv1.ConditionType = "Valid" + ApplySucceededCondition conditionsv1.ConditionType = "ApplySucceeded" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/apis/hive/v1/zz_generated.deepcopy.go b/apis/hive/v1/zz_generated.deepcopy.go index a41584ab13e..ac12a798fc7 100644 --- a/apis/hive/v1/zz_generated.deepcopy.go +++ b/apis/hive/v1/zz_generated.deepcopy.go @@ -767,7 +767,11 @@ func (in *ClusterDeploymentCustomizationStatus) DeepCopyInto(out *ClusterDeploym *out = new(corev1.LocalObjectReference) **out = **in } - in.LastApplyTime.DeepCopyInto(&out.LastApplyTime) + if in.ClusterPoolRef != nil { + in, out := &in.ClusterPoolRef, &out.ClusterPoolRef + *out = new(corev1.LocalObjectReference) + **out = **in + } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make([]conditionsv1.Condition, len(*in)) diff --git a/config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml b/config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml index 76fd10c1d79..04660f92b74 100644 --- a/config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml +++ b/config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml @@ -114,14 +114,9 @@ spec: - type type: object type: array - lastApplyStatus: - description: LastApplyStatus indicates the customization status in - the last applied cluster deployment. - type: string - lastApplyTime: - description: LastApplyTime indicates the time when the customization - was applied on a cluster deployment. - format: date-time + lastAppliedConfiguration: + description: LastAppliedConfiguration contains the last applied patches + to the install-config. type: string type: object required: diff --git a/go.mod b/go.mod index 96f91285a59..475c45c68be 100644 --- a/go.mod +++ b/go.mod @@ -38,7 +38,7 @@ require ( github.com/openshift/cluster-api-provider-ibmcloud v0.0.1-0.20220201105455-8014e5e894b0 github.com/openshift/cluster-api-provider-ovirt v0.1.1-0.20220323121149-e3f2850dd519 github.com/openshift/cluster-autoscaler-operator v0.0.0-20211006175002-fe524080b551 - github.com/openshift/custom-resource-status v1.1.2 + github.com/openshift/custom-resource-status v1.1.3-0.20220503160415-f2fdb4999d87 github.com/openshift/generic-admission-server v1.14.1-0.20200903115324-4ddcdd976480 github.com/openshift/hive/apis v0.0.0 github.com/openshift/installer v0.9.0-master.0.20220711145509-cdb9627de7ef diff --git a/go.sum b/go.sum index 087cdef92d3..0e7055eda69 100644 --- a/go.sum +++ b/go.sum @@ -1046,8 +1046,9 @@ github.com/openshift/cluster-api-provider-ovirt v0.1.1-0.20220323121149-e3f2850d github.com/openshift/cluster-api-provider-ovirt v0.1.1-0.20220323121149-e3f2850dd519/go.mod h1:C7unCUThP8eqT4xQfbvg3oIDn2S9TYtb0wbBoH/SR2U= github.com/openshift/cluster-autoscaler-operator v0.0.0-20211006175002-fe524080b551 h1:nGa6igwzG7smZOACUsovgf9XG8vT96Zdyc4H6r2rqS0= github.com/openshift/cluster-autoscaler-operator v0.0.0-20211006175002-fe524080b551/go.mod h1:72ieWchfTx9U7UbQO47vhSXBoCi2IJGZhXoCezan4EM= -github.com/openshift/custom-resource-status v1.1.2 h1:C3DL44LEbvlbItfd8mT5jWrqPfHnSOQoQf/sypqA6A4= github.com/openshift/custom-resource-status v1.1.2/go.mod h1:DB/Mf2oTeiAmVVX1gN+NEqweonAPY0TKUwADizj8+ZA= +github.com/openshift/custom-resource-status v1.1.3-0.20220503160415-f2fdb4999d87 h1:cHyxR+Y8rAMT6m1jQCaYGRwikqahI0OjjUDhFNf3ySQ= +github.com/openshift/custom-resource-status v1.1.3-0.20220503160415-f2fdb4999d87/go.mod h1:DB/Mf2oTeiAmVVX1gN+NEqweonAPY0TKUwADizj8+ZA= github.com/openshift/generic-admission-server v1.14.1-0.20200903115324-4ddcdd976480 h1:y47BAJFepK8Xls1c+quIOyc46OXiT9LRiqGVjIaMlSA= github.com/openshift/generic-admission-server v1.14.1-0.20200903115324-4ddcdd976480/go.mod h1:OAHL5WnZphlhVEf5fTdeGLvNwMu1B2zCWpmxJpCA35o= github.com/openshift/installer v0.9.0-master.0.20220711145509-cdb9627de7ef h1:y3d9tfJqoKLsUwOJHi3iPBYmJe4Ukj8n19SkUsZbVUA= diff --git a/hack/app-sre/saas-template.yaml b/hack/app-sre/saas-template.yaml index 392cbb05251..49c2bccd3ef 100644 --- a/hack/app-sre/saas-template.yaml +++ b/hack/app-sre/saas-template.yaml @@ -368,14 +368,9 @@ objects: - type type: object type: array - lastApplyStatus: - description: LastApplyStatus indicates the customization status - in the last applied cluster deployment. - type: string - lastApplyTime: - description: LastApplyTime indicates the time when the customization - was applied on a cluster deployment. - format: date-time + lastAppliedConfiguration: + description: LastAppliedConfiguration contains the last applied + patches to the install-config. type: string type: object required: diff --git a/pkg/controller/clusterdeployment/clusterdeployment_controller.go b/pkg/controller/clusterdeployment/clusterdeployment_controller.go index 8a146242624..40fe36d98c6 100644 --- a/pkg/controller/clusterdeployment/clusterdeployment_controller.go +++ b/pkg/controller/clusterdeployment/clusterdeployment_controller.go @@ -1442,58 +1442,40 @@ func (r *ReconcileClusterDeployment) removeClusterDeploymentFinalizer(cd *hivev1 } func (r *ReconcileClusterDeployment) releaseCustomization(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) error { - if cd.Spec.ClusterPoolRef == nil || cd.Spec.ClusterPoolRef.CustomizationRef == nil || !controllerutils.HasFinalizer(cd, hivev1.FinalizerCustomizationRelease) { + if cpRef := cd.Spec.ClusterPoolRef; cpRef == nil || cpRef.CustomizationRef == nil { return nil } - customizationRef := cd.Spec.ClusterPoolRef.CustomizationRef + cdc := &hivev1.ClusterDeploymentCustomization{} cdcNamespace := cd.Spec.ClusterPoolRef.Namespace - cdcName := customizationRef.Name + cdcName := cd.Spec.ClusterPoolRef.CustomizationRef.Name cdcLog := cdLog.WithField("customization", cdcName).WithField("namespace", cdcNamespace) err := r.Client.Get(context.TODO(), client.ObjectKey{Namespace: cdcNamespace, Name: cdcName}, cdc) if err != nil { if apierrors.IsNotFound(err) { cdcLog.Info("customization not found, nothing to release") - controllerutils.DeleteFinalizer(cd, hivev1.FinalizerCustomizationRelease) - if err := r.Update(context.TODO(), cd); err != nil { - cdcLog.WithError(err).Error("failed to update ClusterDeployment") - return err - } return nil } - log.WithError(err).Error("error reading customization") + cdcLog.WithError(err).Error("error reading customization") return err } - changed := false - existingCondition := conditionsv1.FindStatusCondition(cdc.Status.Conditions, conditionsv1.ConditionAvailable) - if existingCondition == nil || existingCondition.Reason != "Available" || existingCondition.Message != "available" { - changed = true - conditionsv1.SetStatusConditionNoHeartbeat(&cdc.Status.Conditions, conditionsv1.Condition{ - Type: conditionsv1.ConditionAvailable, - Status: corev1.ConditionFalse, - Reason: "Available", - Message: "available", - }) - } + changed := conditionsv1.SetStatusConditionNoHeartbeat(&cdc.Status.Conditions, conditionsv1.Condition{ + Type: conditionsv1.ConditionAvailable, + Status: corev1.ConditionFalse, + Reason: "Available", + Message: "available", + }) - if cdc.Status.ClusterDeploymentRef != nil { - changed = true - cdc.Status.ClusterDeploymentRef = nil - } if changed { + cdc.Status.ClusterPoolRef = nil + cdc.Status.ClusterDeploymentRef = nil if err := r.Status().Update(context.Background(), cdc); err != nil { cdcLog.WithError(err).Error("failed to update ClusterDeploymentCustomizationAvailable condition") return err } } - controllerutils.DeleteFinalizer(cd, hivev1.FinalizerCustomizationRelease) - if err := r.Update(context.TODO(), cd); err != nil { - cdcLog.WithError(err).Error("Failed to update ClusterDeployment after ClusterDeploymentCustomization finalizer deletion") - return err - } - return nil } diff --git a/pkg/controller/clusterpool/clusterpool_controller.go b/pkg/controller/clusterpool/clusterpool_controller.go index 37d8daff6cf..a09a938aa5b 100644 --- a/pkg/controller/clusterpool/clusterpool_controller.go +++ b/pkg/controller/clusterpool/clusterpool_controller.go @@ -2,6 +2,7 @@ package clusterpool import ( "context" + "encoding/json" "fmt" "math" "reflect" @@ -28,7 +29,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" - conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" apihelpers "github.com/openshift/hive/apis/helpers" hivev1 "github.com/openshift/hive/apis/hive/v1" "github.com/openshift/hive/pkg/clusterresource" @@ -48,7 +48,6 @@ const ( icSecretDependent = "install config template secret" cdClusterPoolIndex = "spec.clusterpool.namespacedname" claimClusterPoolIndex = "spec.clusterpoolname" - cdcClusterPoolIndex = "status.clusterpoolname" defaultInventoryAttempts = 5 ) @@ -174,6 +173,11 @@ func AddToManager(mgr manager.Manager, r *ReconcileClusterPool, concurrentReconc return err } + // Watch for changes to ClusterDeploymentCustomizations + if err := c.Watch(&source.Kind{Type: &hivev1.ClusterDeploymentCustomization{}}, &handler.EnqueueRequestForObject{}); err != nil { + return err + } + return nil } @@ -377,7 +381,7 @@ func (r *ReconcileClusterPool) Reconcile(ctx context.Context, request reconcile. // If too few, create new InstallConfig and ClusterDeployment. case drift < 0 && availableCapacity > 0: toAdd := minIntVarible(-drift, availableCapacity, availableCurrent) - if err := r.addClusters(clp, poolVersion, cds, toAdd, logger); err != nil { + if err := r.addClusters(clp, poolVersion, cds, toAdd, cdcs, logger); err != nil { log.WithError(err).Error("error adding clusters") return reconcile.Result{}, err } @@ -427,7 +431,6 @@ func (r *ReconcileClusterPool) Reconcile(ctx context.Context, request reconcile. return reconcile.Result{}, nil } -// reconcileRunningClusters ensures the oldest unassigned clusters are set to running, and the // remainder are set to hibernating. The number of clusters we set to running is determined by // adding the cluster's configured runningCount to the number of unsatisfied claims for which we're // spinning up new clusters. @@ -457,10 +460,7 @@ func (r *ReconcileClusterPool) reconcileRunningClusters( for i := 0; i < len(cdList); i++ { cd := cdList[i] hibernateCondition := controllerutils.FindClusterDeploymentCondition(cd.Status.Conditions, hivev1.ClusterHibernatingCondition) - hibernateUnsupported := false - if hibernateCondition != nil && hibernateCondition.Reason == hivev1.HibernatingReasonUnsupported { - hibernateUnsupported = true - } + hibernateUnsupported := hibernateCondition != nil && hibernateCondition.Reason == hivev1.HibernatingReasonUnsupported var desiredPowerState hivev1.ClusterPowerState if i < runningCount || hibernateUnsupported { desiredPowerState = hivev1.ClusterPowerStateRunning @@ -620,6 +620,7 @@ func (r *ReconcileClusterPool) addClusters( poolVersion string, cds *cdCollection, newClusterCount int, + cdcs *cdcCollection, logger log.FieldLogger, ) error { logger.WithField("count", newClusterCount).Info("Adding new clusters") @@ -658,7 +659,7 @@ func (r *ReconcileClusterPool) addClusters( } for i := 0; i < newClusterCount; i++ { - cd, err := r.createCluster(clp, cloudBuilder, pullSecret, installConfigTemplate, poolVersion, logger) + cd, err := r.createCluster(clp, cloudBuilder, pullSecret, installConfigTemplate, poolVersion, cdcs, logger) if err != nil { return err } @@ -674,8 +675,15 @@ func (r *ReconcileClusterPool) createCluster( pullSecret string, installConfigTemplate string, poolVersion string, + cdcs *cdcCollection, logger log.FieldLogger, ) (*hivev1.ClusterDeployment, error) { + if clp.Spec.Inventory != nil { + if len(cdcs.unassigned) == 0 { + return nil, errors.New("no customization available") + } + } + var err error ns, err := r.createRandomNamespace(clp) @@ -729,15 +737,15 @@ func (r *ReconcileClusterPool) createCluster( cdPos = i poolRef := poolReference(clp) cd.Spec.ClusterPoolRef = &poolRef - if err := r.getInventoryCustomization(clp, cd, logger); err != nil { - return nil, err + if clp.Spec.Inventory != nil { + cd.Spec.ClusterPoolRef.CustomizationRef = &corev1.LocalObjectReference{Name: cdcs.unassigned[0].Name} } } else if secretTmp := isInstallConfigSecret(obj); secretTmp != nil { secret = secretTmp } } - if err := r.patchInstallConfig(clp, cd, secret, logger); err != nil { + if err := r.patchInstallConfig(clp, cd, secret, cdcs, logger); err != nil { return nil, err } @@ -756,7 +764,7 @@ func (r *ReconcileClusterPool) createCluster( } // patchInstallConfig responsible for applying ClusterDeploymentCustomization and its reservation -func (r *ReconcileClusterPool) patchInstallConfig(clp *hivev1.ClusterPool, cd *hivev1.ClusterDeployment, secret *corev1.Secret, logger log.FieldLogger) error { +func (r *ReconcileClusterPool) patchInstallConfig(clp *hivev1.ClusterPool, cd *hivev1.ClusterDeployment, secret *corev1.Secret, cdcs *cdcCollection, logger log.FieldLogger) error { if clp.Spec.Inventory == nil { return nil } @@ -783,38 +791,23 @@ func (r *ReconcileClusterPool) patchInstallConfig(clp *hivev1.ClusterPool, cd *h Value: yamlpatch.NewNode(&value), }) } + installConfig, err := newPatch.Apply([]byte(secret.StringData["install-config.yaml"])) if err != nil { - cdcs := cdcCollection{syntax: []*hivev1.ClusterDeploymentCustomization{cdc}} - cdcs.SyncClusterDeploymentCustomizationAssignments(r.Client, clp, &cdCollection{}, logger) - cdc.Status.LastApplyStatus = hivev1.LastApplyBrokenSyntax - if updateErr := r.Status().Update(context.Background(), cdc); updateErr != nil { - if apierrors.IsNotFound(err) { - return errors.New("missing customization") - } - } - + cdcs.BrokenSyntax(r, cdc, fmt.Sprint(err)) + cdcs.UpdateInventoryValidCondition(r, clp) return err } - // Reserving ClusterDeploymentCustomization - existingCondition := conditionsv1.FindStatusCondition(cdc.Status.Conditions, conditionsv1.ConditionAvailable) - if existingCondition == nil || existingCondition.Status == corev1.ConditionFalse { - cdc.Status.ClusterPoolRef = &corev1.LocalObjectReference{Name: clp.Name} - cdc.Status.LastApplyTime = metav1.Now() - conditionsv1.SetStatusConditionNoHeartbeat(&cdc.Status.Conditions, conditionsv1.Condition{ - Type: conditionsv1.ConditionAvailable, - Status: corev1.ConditionFalse, - Reason: "Reserved", - Message: "reserved", - }) + configJson, err := json.Marshal(cdc.Spec) + if err != nil { + return err + } - if err := r.Status().Update(context.Background(), cdc); err != nil { - if apierrors.IsNotFound(err) { - return errors.New("missing customization") - } - return err - } + cdc.Status.LastAppliedConfiguration = string(configJson) + cdc.Status.ClusterPoolRef = &corev1.LocalObjectReference{Name: clp.Name} + if err := cdcs.Reserve(r, cdc); err != nil { + return err } secret.StringData["install-config.yaml"] = string(installConfig) @@ -1123,55 +1116,6 @@ func (r *ReconcileClusterPool) createCloudBuilder(pool *hivev1.ClusterPool, logg } } -// getInventoryCustomization retrieves available ClusterDeploymentCustomizations -// and picks oldest successful customizations to avoid using the same broken -// customization. When customizations have the same last apply status, the -// oldest used customization will be prioritized. -func (r *ReconcileClusterPool) getInventoryCustomization(pool *hivev1.ClusterPool, cd *hivev1.ClusterDeployment, logger log.FieldLogger) error { - if pool.Spec.Inventory == nil { - return nil - } - cdcs, err := getAllCustomizationsForPool(r.Client, pool, logger) - if err != nil { - return err - } - - if len(cdcs.unassigned) == 0 { - return errors.New("no customization available") - } - - sort.Slice( - cdcs.unassigned, - func(i, j int) bool { - iName := cdcs.unassigned[i].Name - jName := cdcs.unassigned[j].Name - iStatus := cdcs.unassigned[i].Status.LastApplyStatus - jStatus := cdcs.unassigned[j].Status.LastApplyStatus - iTime := cdcs.unassigned[i].Status.LastApplyTime - jTime := cdcs.unassigned[j].Status.LastApplyTime - if iStatus == "" { - iStatus = hivev1.LastApplySucceeded - } - if jStatus == "" { - iStatus = hivev1.LastApplySucceeded - } - if iStatus == jStatus { - return iTime.Before(&jTime) - } - if iStatus == hivev1.LastApplySucceeded { - return false - } - if jStatus == hivev1.LastApplySucceeded { - return true - } - return iName < jName - }, - ) - - cd.Spec.ClusterPoolRef.CustomizationRef = &corev1.LocalObjectReference{Name: cdcs.unassigned[0].Name} - return nil -} - func isInstallConfigSecret(obj interface{}) *corev1.Secret { if secret, ok := obj.(*corev1.Secret); ok { _, ok := secret.StringData["install-config.yaml"] diff --git a/pkg/controller/clusterpool/clusterpool_controller_test.go b/pkg/controller/clusterpool/clusterpool_controller_test.go index ad71d16a86d..2fda5efe395 100644 --- a/pkg/controller/clusterpool/clusterpool_controller_test.go +++ b/pkg/controller/clusterpool/clusterpool_controller_test.go @@ -310,12 +310,9 @@ func TestReconcileClusterPool(t *testing.T) { testcp.WithInventory([]string{"test-cdc-successful-old", "test-cdc-unused-new"}), ), testcdc.FullBuilder(testNamespace, "test-cdc-successful-old", scheme).Build( - testcdc.WithLastStatus(hivev1.LastApplySucceeded), - testcdc.WithLastTime(nowish.Add(-time.Hour)), - ), - testcdc.FullBuilder(testNamespace, "test-cdc-unused-new", scheme).Build( - testcdc.WithLastTime(nowish), + testcdc.WithApplySucceeded(hivev1.CustomizationApplyReasonSucceeded, nowish.Add(-time.Hour)), ), + testcdc.FullBuilder(testNamespace, "test-cdc-unused-new", scheme).Build(), }, expectedTotalClusters: 1, expectedInventoryVaildStatus: corev1.ConditionTrue, @@ -332,12 +329,10 @@ func TestReconcileClusterPool(t *testing.T) { testcp.WithInventory([]string{"test-cdc-successful-new", "test-cdc-broken-old"}), ), testcdc.FullBuilder(testNamespace, "test-cdc-broken-old", scheme).Build( - testcdc.WithLastStatus(hivev1.LastApplySucceeded), - testcdc.WithLastTime(nowish.Add(-time.Hour)), + testcdc.WithApplySucceeded(hivev1.CustomizationApplyReasonSucceeded, nowish.Add(-time.Hour)), ), testcdc.FullBuilder(testNamespace, "test-cdc-successful-new", scheme).Build( - testcdc.WithLastStatus(hivev1.LastApplyBrokenCloud), - testcdc.WithLastTime(nowish), + testcdc.WithApplySucceeded(hivev1.CustomizationApplyReasonBrokenCloud, nowish), ), }, expectedTotalClusters: 1, @@ -349,10 +344,12 @@ func TestReconcileClusterPool(t *testing.T) { { name: "cp with inventory - release cdc when cd is missing", existing: []runtime.Object{ - inventoryPoolBuilder.Build(testcp.WithSize(1)), - testcdc.FullBuilder(testNamespace, "test-cdc-broken-old", scheme).Build( - testcdc.WithLastStatus(hivev1.LastApplySucceeded), - testcdc.WithLastTime(nowish.Add(-time.Hour)), + inventoryPoolBuilder.Build( + testcp.WithSize(1), + testcp.WithInventory([]string{"test-cdc-1"}), + ), + testcdc.FullBuilder(testNamespace, "test-cdc-1", scheme).Build( + testcdc.WithApplySucceeded(hivev1.CustomizationApplyReasonSucceeded, nowish.Add(-time.Hour)), testcdc.WithPool(testLeasePoolName), testcdc.WithCD("c1"), testcdc.Reserved(), @@ -360,13 +357,15 @@ func TestReconcileClusterPool(t *testing.T) { }, expectedTotalClusters: 1, expectedPoolVersion: "06983eaafac7f695", - expectedAssignedCDCs: 0, + expectedAssignedCDCs: 1, // The CDC will be assigned to a new cluster }, - // #################### { name: "cp with inventory - fix cdc when cd reference exists", existing: []runtime.Object{ - inventoryPoolBuilder.Build(testcp.WithSize(1)), + inventoryPoolBuilder.Build( + testcp.WithSize(1), + testcp.WithInventory([]string{"test-cdc-1"}), + ), testcdc.FullBuilder(testNamespace, "test-cdc-1", scheme).Build( testcdc.Available(), ), @@ -378,7 +377,7 @@ func TestReconcileClusterPool(t *testing.T) { expectedTotalClusters: 1, expectedObservedSize: 1, expectedPoolVersion: "06983eaafac7f695", - expectedAssignedCDCs: 0, + expectedAssignedCDCs: 1, expectedCDCurrentStatus: corev1.ConditionUnknown, }, { @@ -1616,9 +1615,6 @@ func TestReconcileClusterPool(t *testing.T) { } for _, test := range tests { - if test.name != "cp with inventory - fix cdc when cd reference exists" { - // continue - } t.Run(test.name, func(t *testing.T) { if !test.noClusterImageSet { test.existing = append( @@ -1661,7 +1657,8 @@ func TestReconcileClusterPool(t *testing.T) { } else { assert.NoError(t, err, "expected no error from reconcile") } - + cdcs := &hivev1.ClusterDeploymentCustomizationList{} + fakeClient.List(context.Background(), cdcs) pool := &hivev1.ClusterPool{} err = fakeClient.Get(context.Background(), client.ObjectKey{Namespace: testNamespace, Name: testLeasePoolName}, pool) @@ -1808,7 +1805,7 @@ func TestReconcileClusterPool(t *testing.T) { assert.Equal(t, test.expectedUnassignedClaims, actualUnassignedClaims, "unexpected number of unassigned claims") actualAssignedCDCs := 0 - cdcs := &hivev1.ClusterDeploymentCustomizationList{} + // cdcs := &hivev1.ClusterDeploymentCustomizationList{} err = fakeClient.List(context.Background(), cdcs) require.NoError(t, err) for _, cdc := range cdcs.Items { diff --git a/pkg/controller/clusterpool/collections.go b/pkg/controller/clusterpool/collections.go index 98ec9289588..6ae16a709f1 100644 --- a/pkg/controller/clusterpool/collections.go +++ b/pkg/controller/clusterpool/collections.go @@ -478,6 +478,17 @@ func (cds *cdCollection) Unassigned(includeBroken bool) []*hivev1.ClusterDeploym return ret } +// Installed returns the list of ClusterDeployments which are Installed +func (cds *cdCollection) Installed() []*hivev1.ClusterDeployment { + ret := []*hivev1.ClusterDeployment{} + ret = append(ret, cds.assignable...) + ret = append(ret, cds.standby...) + for _, cd := range cds.byClaimName { + ret = append(ret, cd) + } + return ret +} + // UnknownPoolVersion returns the list of ClusterDeployments whose pool version annotation is // missing or empty. func (cds *cdCollection) UnknownPoolVersion() []*hivev1.ClusterDeployment { @@ -600,9 +611,9 @@ func (cds *cdCollection) Delete(c client.Client, cdName string) error { } type cdcCollection struct { - // Unclaimed by any cluster pool CD any not broken + // Unclaimed by any cluster pool CD and are not broken unassigned []*hivev1.ClusterDeploymentCustomization - // Listed in the cluster pool inventory but not found + // Missing CDC means listed in pool inventory but the custom resource doesn't exist in the pool namespace missing []string // Used by some cluster deployment reserved []*hivev1.ClusterDeploymentCustomization @@ -610,23 +621,24 @@ type cdcCollection struct { cloud []*hivev1.ClusterDeploymentCustomization // Failed to apply patches for this cluster pool syntax []*hivev1.ClusterDeploymentCustomization - // All CDCs in this pool + // ByCDCName are all the CDCs listed in the pool inventory, the CR exists and are mapped by name byCDCName map[string]*hivev1.ClusterDeploymentCustomization + // Namespace are all the CDC in the namespace mapped by name + namespace map[string]*hivev1.ClusterDeploymentCustomization } // getAllCustomizationsForPool is the constructor for a cdcCollection for all of the // ClusterDeploymentCustomizations that are related to specified pool. func getAllCustomizationsForPool(c client.Client, pool *hivev1.ClusterPool, logger log.FieldLogger) (*cdcCollection, error) { if pool.Spec.Inventory == nil { - return nil, nil + return &cdcCollection{}, nil } cdcList := &hivev1.ClusterDeploymentCustomizationList{} if err := c.List( context.Background(), cdcList, - client.MatchingFields{claimClusterPoolIndex: pool.Name}, client.InNamespace(pool.Namespace)); err != nil { logger.WithError(err).Error("error listing ClusterDeploymentCustomizations") - return nil, err + return &cdcCollection{}, err } cdcCol := cdcCollection{ @@ -636,34 +648,39 @@ func getAllCustomizationsForPool(c client.Client, pool *hivev1.ClusterPool, logg cloud: make([]*hivev1.ClusterDeploymentCustomization, 0), syntax: make([]*hivev1.ClusterDeploymentCustomization, 0), byCDCName: make(map[string]*hivev1.ClusterDeploymentCustomization), + namespace: make(map[string]*hivev1.ClusterDeploymentCustomization), + } + + for i, cdc := range cdcList.Items { + ref := &cdcList.Items[i] + cdcCol.namespace[cdc.Name] = ref } for _, item := range pool.Spec.Inventory { - missing := true - for i, cdc := range cdcList.Items { - ref := &cdcList.Items[i] - if cdc.Name != item.Name { - continue - } - missing = false - cdcCol.byCDCName[item.Name] = ref + if cdc, ok := cdcCol.namespace[item.Name]; ok { + cdcCol.byCDCName[item.Name] = cdc if cdRef := cdc.Status.ClusterDeploymentRef; cdRef == nil { - cdcCol.unassigned = append(cdcCol.unassigned, ref) + cdcCol.unassigned = append(cdcCol.unassigned, cdc) } else { - cdcCol.reserved = append(cdcCol.reserved, ref) + cdcCol.reserved = append(cdcCol.reserved, cdc) } - if cdc.Status.LastApplyStatus == hivev1.LastApplyBrokenCloud { - cdcCol.cloud = append(cdcCol.cloud, ref) + applyStatus := conditionsv1.FindStatusCondition(cdc.Status.Conditions, hivev1.ApplySucceededCondition) + if applyStatus == nil { + continue } - if cdc.Status.LastApplyStatus == hivev1.LastApplyBrokenSyntax { - cdcCol.syntax = append(cdcCol.cloud, ref) + if applyStatus.Reason == hivev1.CustomizationApplyReasonBrokenCloud { + cdcCol.cloud = append(cdcCol.cloud, cdc) } - } - if missing { + if applyStatus.Reason == hivev1.CustomizationApplyReasonBrokenSyntax { + cdcCol.syntax = append(cdcCol.cloud, cdc) + } + } else { cdcCol.missing = append(cdcCol.missing, item.Name) } } + cdcCol.Sort() + logger.WithFields(log.Fields{ "reservedCount": len(cdcCol.reserved), "unassignedCount": len(cdcCol.unassigned), @@ -675,6 +692,188 @@ func getAllCustomizationsForPool(c client.Client, pool *hivev1.ClusterPool, logg return &cdcCol, nil } +// Sort unassigned oldest successful customizations to avoid using the same broken +// customization. When customizations have the same last apply status, the +// oldest used customization will be prioritized. +func (cdcs *cdcCollection) Sort() { + sort.Slice( + cdcs.unassigned, + func(i, j int) bool { + iStatus := conditionsv1.FindStatusCondition(cdcs.unassigned[i].Status.Conditions, hivev1.ApplySucceededCondition) + jStatus := conditionsv1.FindStatusCondition(cdcs.unassigned[j].Status.Conditions, hivev1.ApplySucceededCondition) + iName := cdcs.unassigned[i].Name + jName := cdcs.unassigned[j].Name + if iStatus == nil { + iStatus = &conditionsv1.Condition{Reason: hivev1.CustomizationApplyReasonSucceeded} + iStatus.LastTransitionTime = metav1.NewTime(time.Now()) + } + if jStatus == nil { + jStatus = &conditionsv1.Condition{Reason: hivev1.CustomizationApplyReasonSucceeded} + jStatus.LastTransitionTime = metav1.NewTime(time.Now()) + } + iTime := iStatus.LastTransitionTime + jTime := jStatus.LastTransitionTime + if iStatus.Reason == jStatus.Reason { + if iTime.Equal(&jTime) { + // Sort by name to make this deterministic + return iName < jName + } + return iTime.Before(&jTime) + } + if iStatus.Reason == hivev1.CustomizationApplyReasonSucceeded { + return false + } + if jStatus.Reason == hivev1.CustomizationApplyReasonSucceeded { + return true + } + return iName < jName + }, + ) +} + +// Reserve +func (cdcs *cdcCollection) Reserve(c client.Client, cdc *hivev1.ClusterDeploymentCustomization) error { + for i, cdci := range cdcs.unassigned { + if cdci.Name == cdc.Name { + reservationChanged := conditionsv1.SetStatusConditionNoHeartbeat(&cdc.Status.Conditions, conditionsv1.Condition{ + Type: conditionsv1.ConditionAvailable, + Status: corev1.ConditionFalse, + Reason: "Reserved", + Message: "reserved", + }) + + applyChanged := conditionsv1.SetStatusConditionNoHeartbeat(&cdc.Status.Conditions, conditionsv1.Condition{ + Type: hivev1.ApplySucceededCondition, + Status: corev1.ConditionFalse, + Reason: hivev1.CustomizationApplyReasonInstallationPending, + Message: "Cluster installation in progress", + }) + + if reservationChanged || applyChanged { + if err := c.Status().Update(context.Background(), cdc); err != nil { + return err + } + } + + cdcs.byCDCName[cdc.Name] = cdc + cdcs.reserved = append(cdcs.reserved, cdc) + copy(cdcs.unassigned[i:], cdcs.unassigned[i+1:]) + cdcs.unassigned = cdcs.unassigned[:len(cdcs.unassigned)-1] + applyStatus := conditionsv1.FindStatusCondition(cdc.Status.Conditions, hivev1.ApplySucceededCondition) + if applyStatus != nil && applyStatus.Reason == hivev1.CustomizationApplyReasonBrokenCloud { + cdcs.cloud = append(cdcs.cloud, cdc) + } + if applyStatus != nil && applyStatus.Reason == hivev1.CustomizationApplyReasonBrokenSyntax { + cdcs.syntax = append(cdcs.cloud, cdc) + } + cdcs.Sort() + return nil + } + } + return fmt.Errorf("ClusterDeploymentCustomization %s is not reserved, but also was not found in the unassigned list; this is a bug", cdc.Name) +} + +// Available +func (cdcs *cdcCollection) Available(c client.Client, cdc *hivev1.ClusterDeploymentCustomization) error { + for i, cdci := range cdcs.reserved { + if cdci.Name == cdc.Name { + cdc.Status.ClusterDeploymentRef = nil + cdc.Status.ClusterPoolRef = nil + changed := conditionsv1.SetStatusConditionNoHeartbeat(&cdc.Status.Conditions, conditionsv1.Condition{ + Type: conditionsv1.ConditionAvailable, + Status: corev1.ConditionTrue, + Reason: "Available", + Message: "available", + }) + + if changed { + if err := c.Status().Update(context.Background(), cdci); err != nil { + return err + } + } + + cdcs.byCDCName[cdc.Name] = cdc + cdcs.unassigned = append(cdcs.unassigned, cdc) + copy(cdcs.reserved[i:], cdcs.reserved[i+1:]) + cdcs.reserved = cdcs.reserved[:len(cdcs.reserved)-1] + applyStatus := conditionsv1.FindStatusCondition(cdc.Status.Conditions, hivev1.ApplySucceededCondition) + if applyStatus != nil && applyStatus.Reason == hivev1.CustomizationApplyReasonBrokenCloud { + cdcs.cloud = append(cdcs.cloud, cdc) + } + if applyStatus != nil && applyStatus.Reason == hivev1.CustomizationApplyReasonBrokenSyntax { + cdcs.syntax = append(cdcs.cloud, cdc) + } + cdcs.Sort() + return nil + } + } + return fmt.Errorf("ClusterDeploymentCustomization %s is not reserved, but also was not found in the unassigned list; this is a bug", cdc.Name) +} + +func (cdcs *cdcCollection) BrokenSyntax(c client.Client, cdc *hivev1.ClusterDeploymentCustomization, msg string) { + for _, cdci := range cdcs.unassigned { + if cdci.Name == cdc.Name { + changed := conditionsv1.SetStatusConditionNoHeartbeat(&cdc.Status.Conditions, conditionsv1.Condition{ + Type: hivev1.ApplySucceededCondition, + Status: corev1.ConditionFalse, + Reason: hivev1.CustomizationApplyReasonBrokenSyntax, + Message: msg, + }) + + if changed { + c.Status().Update(context.Background(), cdci) + } + + cdcs.syntax = append(cdcs.syntax, cdc) + } + } +} + +func (cdcs *cdcCollection) BrokenCloud(c client.Client, cdc *hivev1.ClusterDeploymentCustomization) error { + for _, cdci := range cdcs.reserved { + if cdci.Name == cdc.Name { + changed := conditionsv1.SetStatusConditionNoHeartbeat(&cdc.Status.Conditions, conditionsv1.Condition{ + Type: hivev1.ApplySucceededCondition, + Status: corev1.ConditionFalse, + Reason: hivev1.CustomizationApplyReasonBrokenCloud, + Message: "Cluster installation failed. This may or may not be the fault of patches. Check the installation logs.", + }) + + if changed { + if err := c.Status().Update(context.Background(), cdci); err != nil { + return err + } + } + + cdcs.cloud = append(cdcs.cloud, cdc) + return nil + } + } + return fmt.Errorf("ClusterDeploymentCustomization %s is broken by cloud, but is not found in the unassigned list; this is a bug", cdc.Name) +} + +func (cdcs *cdcCollection) Succeeded(c client.Client, cdc *hivev1.ClusterDeploymentCustomization) error { + for _, cdci := range cdcs.reserved { + if cdci.Name == cdc.Name { + changed := conditionsv1.SetStatusConditionNoHeartbeat(&cdc.Status.Conditions, conditionsv1.Condition{ + Type: hivev1.ApplySucceededCondition, + Status: corev1.ConditionTrue, + Reason: hivev1.CustomizationApplyReasonSucceeded, + Message: "Patches applied and cluster installed successfully", + }) + + if changed { + if err := c.Status().Update(context.Background(), cdci); err != nil { + return err + } + } + + return nil + } + } + return fmt.Errorf("ClusterDeploymentCustomization %s successfuly applied, but is not found in the reserved list; this is a bug", cdc.Name) +} + // SyncClusterDeploymentCustomizations makes sure that CDCs relagted to the pool, the pool inventory and related ClusterDeployments // are in the correct state. func (cdcs *cdcCollection) SyncClusterDeploymentCustomizationAssignments(c client.Client, pool *hivev1.ClusterPool, cds *cdCollection, logger log.FieldLogger) error { @@ -682,87 +881,92 @@ func (cdcs *cdcCollection) SyncClusterDeploymentCustomizationAssignments(c clien return nil } - contains := func(s []string, str string) bool { - for _, v := range s { - if v == str { - return true + // Handle deletion of CDC in the namespace + for _, cdc := range cdcs.namespace { + if cdc.DeletionTimestamp != nil && !controllerutils.HasFinalizer(cdc, finalizer) { + controllerutils.AddFinalizer(cdc, finalizer) + if err := c.Update(context.Background(), cdc); err != nil { + return err } + } else if conditionsv1.IsStatusConditionTrue(cdc.Status.Conditions, conditionsv1.ConditionAvailable) { + controllerutils.DeleteFinalizer(cdc, finalizer) } - - return false - } - - cdNames := []string{} - for cdName := range cds.byCDName { - cdNames = append(cdNames, cdName) } // If there is no CD, but CDC is reserved, then we release the CDC for _, cdc := range cdcs.reserved { - if !contains(cdNames, cdc.Status.ClusterDeploymentRef.Name) { - if err := setCustomizationAvailabilityCondition(c, cdc, nil, logger); err != nil { + if cds.ByName(cdc.Status.ClusterDeploymentRef.Name) == nil { + if err := cdcs.Available(c, cdc); err != nil { return err } } } // Make sure CD <=> CDC links are legit; repair them if not. - for _, cdc := range cdcs.unassigned { - for _, cd := range cds.byCDName { - if cd.Spec.ClusterPoolRef.CustomizationRef != nil && cd.Spec.ClusterPoolRef.CustomizationRef.Name == cdc.Name { - if err := setCustomizationAvailabilityCondition(c, cdc, cd, logger); err != nil { - return err - } - } + for _, cd := range cds.byCDName { + // CD has CDC + cpRef := cd.Spec.ClusterPoolRef + if cpRef.CustomizationRef == nil { + continue } - } - // Notice a Broken CD => update the CDC's LastApplyStatus to BrokenByCloud; - for _, cd := range cds.broken { - cdcRef := cd.Spec.ClusterPoolRef.CustomizationRef - if cdcRef == nil { + // CDC exists + cdc, ok := cdcs.namespace[cpRef.CustomizationRef.Name] + if !ok { continue } - update := true - for _, cdc := range cdcs.cloud { - if cdc.Name == cdcRef.Name { - update = false - break + // CDC is no reserved + available := conditionsv1.FindStatusCondition(cdc.Status.Conditions, conditionsv1.ConditionAvailable) + if available == nil || available.Status == corev1.ConditionTrue { + // Fix CDC availability + cdc.Status.ClusterDeploymentRef = &corev1.LocalObjectReference{Name: cd.Name} + cdc.Status.ClusterPoolRef = &corev1.LocalObjectReference{Name: pool.Name} + if err := cdcs.Reserve(c, cdc); err != nil { + return err } } + } - if update { - cdc := cdcs.byCDCName[cd.Spec.ClusterPoolRef.CustomizationRef.Name] - cdcs.cloud = append(cdcs.cloud, cdc) - cdc.Status.LastApplyStatus = hivev1.LastApplyBrokenCloud - cdc.Status.LastApplyTime = metav1.Now() - if err := c.Status().Update(context.TODO(), cdc); err != nil { + // Notice a Broken CD => update the CDC's ApplySucceded condition to BrokenByCloud; + for _, cd := range cds.Broken() { + cdcRef := cd.Spec.ClusterPoolRef.CustomizationRef + if cdcRef == nil { + continue + } + if cdc := cdcs.byCDCName[cdcRef.Name]; cdc != nil { + if err := cdcs.BrokenCloud(c, cdc); err != nil { return err } } } - // Notice a CD has finished installing => update the CDC's LastApplyStatus to Success; - for _, cd := range cds.Unassigned(false) { + + // Notice a CD has finished installing => update the CDC's ApplySucceeded condition to Success; + for _, cd := range cds.Installed() { if cd.Spec.ClusterPoolRef.CustomizationRef == nil { continue } if cdc, ok := cdcs.byCDCName[cd.Spec.ClusterPoolRef.CustomizationRef.Name]; ok { - if cdc.Status.LastApplyStatus != hivev1.LastApplySucceeded { - cdc.Status.LastApplyStatus = hivev1.LastApplySucceeded - cdc.Status.LastApplyTime = metav1.Now() - if err := c.Status().Update(context.TODO(), cdc); err != nil { - return err - } + if err := cdcs.Succeeded(c, cdc); err != nil { + return err } } } - // Update Cluster Pool Inventory Condition + if err := cdcs.UpdateInventoryValidCondition(c, pool); err != nil { + return err + } + + return nil +} + +// Update Cluster Pool Inventory Condition +func (cdcs *cdcCollection) UpdateInventoryValidCondition(c client.Client, pool *hivev1.ClusterPool) error { message := "" status := corev1.ConditionTrue reason := hivev1.InventoryReasonValid if (len(cdcs.syntax) + len(cdcs.cloud) + len(cdcs.missing)) > 0 { + var _ json.Marshaler = &cdcCollection{} messageByte, err := json.Marshal(cdcs) if err != nil { return err @@ -800,6 +1004,9 @@ func (cdcs *cdcCollection) MarshalJSON() ([]byte, error) { for _, cdc := range cdcs.syntax { syntax = append(syntax, cdc.Name) } + sort.Strings(cloud) + sort.Strings(syntax) + sort.Strings(cdcs.missing) return json.Marshal(&struct { BrokenByCloud []string @@ -812,39 +1019,6 @@ func (cdcs *cdcCollection) MarshalJSON() ([]byte, error) { }) } -func setCustomizationAvailabilityCondition(c client.Client, cdc *hivev1.ClusterDeploymentCustomization, cd *hivev1.ClusterDeployment, logger log.FieldLogger) error { - status := corev1.ConditionTrue - reason := "Available" - message := "available" - cdc.Status.ClusterDeploymentRef = nil - cdc.Status.ClusterPoolRef = nil - - if cd != nil { - status = corev1.ConditionFalse - reason = "Reserved" - message = "reserved" - cdc.Status.ClusterDeploymentRef = &corev1.LocalObjectReference{Name: cd.Name} - cdc.Status.ClusterPoolRef = &corev1.LocalObjectReference{Name: cd.Spec.ClusterPoolRef.PoolName} - } - - existingCondition := conditionsv1.FindStatusCondition(cdc.Status.Conditions, conditionsv1.ConditionAvailable) - if existingCondition == nil || existingCondition.Reason != reason || existingCondition.Message != message { - conditionsv1.SetStatusConditionNoHeartbeat(&cdc.Status.Conditions, conditionsv1.Condition{ - Type: conditionsv1.ConditionAvailable, - Status: status, - Reason: reason, - Message: message, - }) - - if err := c.Status().Update(context.TODO(), cdc); err != nil { - logger.WithError(err).Log(controllerutils.LogLevel(err), "could not update ClusterDeploymentCustomization conditions") - return err - } - } - - return nil -} - // setCDsCurrentCondition idempotently sets the ClusterDeploymentsCurrent condition on the // ClusterPool according to whether all unassigned CDs have the same PoolVersion as the pool. func setCDsCurrentCondition(c client.Client, cds *cdCollection, clp *hivev1.ClusterPool, poolVersion string) error { diff --git a/pkg/controller/hibernation/hibernation_controller_test.go b/pkg/controller/hibernation/hibernation_controller_test.go index c9910f25520..a708f6ef763 100644 --- a/pkg/controller/hibernation/hibernation_controller_test.go +++ b/pkg/controller/hibernation/hibernation_controller_test.go @@ -26,7 +26,6 @@ import ( machineapi "github.com/openshift/api/machine/v1beta1" hivev1 "github.com/openshift/hive/apis/hive/v1" - hivev1openstack "github.com/openshift/hive/apis/hive/v1/openstack" hiveintv1alpha1 "github.com/openshift/hive/apis/hiveinternal/v1alpha1" "github.com/openshift/hive/pkg/constants" "github.com/openshift/hive/pkg/controller/hibernation/mock" @@ -1258,13 +1257,6 @@ func readyCondition(status corev1.ConditionStatus, reason string, lastTransition type clusterDeploymentOptions struct{} -func (*clusterDeploymentOptions) customized(cd *hivev1.ClusterDeployment) { - cd.Spec.Platform.OpenStack = &hivev1openstack.Platform{} - cd.Spec.ClusterPoolRef = &hivev1.ClusterPoolReference{ - CustomizationRef: &corev1.LocalObjectReference{Name: "cdc"}, - } -} - func (*clusterDeploymentOptions) notInstalled(cd *hivev1.ClusterDeployment) { cd.Spec.Installed = false } diff --git a/pkg/controller/utils/clusterdeployment.go b/pkg/controller/utils/clusterdeployment.go index f0590435e51..6f1e7d43a65 100644 --- a/pkg/controller/utils/clusterdeployment.go +++ b/pkg/controller/utils/clusterdeployment.go @@ -24,12 +24,6 @@ func IsFakeCluster(cd *hivev1.ClusterDeployment) bool { return fakeCluster && err == nil } -func IsOnpremCustomized(cd *hivev1.ClusterDeployment) bool { - customized := cd.Spec.ClusterPoolRef != nil - onprem := cd.Spec.Platform.OpenStack != nil - return customized && onprem -} - // IsClusterPausedOrRelocating checks if the syncing to the cluster is paused or if the cluster is relocating func IsClusterPausedOrRelocating(cd *hivev1.ClusterDeployment, logger log.FieldLogger) bool { if paused, err := strconv.ParseBool(cd.Annotations[constants.SyncsetPauseAnnotation]); err == nil && paused { diff --git a/pkg/test/clusterdeploymentcustomization/clusterdeploymentcustomization.go b/pkg/test/clusterdeploymentcustomization/clusterdeploymentcustomization.go index 3ab7ab0e765..1fe66ad96d2 100644 --- a/pkg/test/clusterdeploymentcustomization/clusterdeploymentcustomization.go +++ b/pkg/test/clusterdeploymentcustomization/clusterdeploymentcustomization.go @@ -109,15 +109,31 @@ func WithPatch(path, op, value string) Option { } } -func WithLastStatus(status hivev1.LastApplyStatusType) Option { +func WithApplySucceeded(reason string, change time.Time) Option { return func(cdc *hivev1.ClusterDeploymentCustomization) { - cdc.Status.LastApplyStatus = status - } -} - -func WithLastTime(lastTime time.Time) Option { - return func(cdc *hivev1.ClusterDeploymentCustomization) { - cdc.Status.LastApplyTime = metav1.NewTime(lastTime) + status := corev1.ConditionTrue + if reason != hivev1.CustomizationApplyReasonSucceeded { + status = corev1.ConditionFalse + } + + if cdc.Status.Conditions == nil { + cdc.Status.Conditions = []conditionsv1.Condition{} + } + existingCondition := conditionsv1.FindStatusCondition(cdc.Status.Conditions, hivev1.ApplySucceededCondition) + if existingCondition == nil { + newCondition := conditionsv1.Condition{ + Type: hivev1.ApplySucceededCondition, + Status: status, + Reason: reason, + Message: reason, + } + newCondition.LastTransitionTime = metav1.NewTime(change) + cdc.Status.Conditions = append(cdc.Status.Conditions, newCondition) + } else { + existingCondition.LastTransitionTime = metav1.NewTime(change) + existingCondition.Status = status + existingCondition.Reason = reason + } } } diff --git a/vendor/github.com/openshift/custom-resource-status/conditions/v1/conditions.go b/vendor/github.com/openshift/custom-resource-status/conditions/v1/conditions.go index bbeee804a2b..7f98c60a063 100644 --- a/vendor/github.com/openshift/custom-resource-status/conditions/v1/conditions.go +++ b/vendor/github.com/openshift/custom-resource-status/conditions/v1/conditions.go @@ -8,7 +8,8 @@ import ( ) // SetStatusCondition sets the corresponding condition in conditions to newCondition. -func SetStatusCondition(conditions *[]Condition, newCondition Condition) { +// The return value indicates if this resulted in any changes *other than* LastHeartbeatTime. +func SetStatusCondition(conditions *[]Condition, newCondition Condition) bool { if conditions == nil { conditions = &[]Condition{} } @@ -17,22 +18,18 @@ func SetStatusCondition(conditions *[]Condition, newCondition Condition) { newCondition.LastTransitionTime = metav1.NewTime(time.Now()) newCondition.LastHeartbeatTime = metav1.NewTime(time.Now()) *conditions = append(*conditions, newCondition) - return - } - - if existingCondition.Status != newCondition.Status { - existingCondition.Status = newCondition.Status - existingCondition.LastTransitionTime = metav1.NewTime(time.Now()) + return true } - existingCondition.Reason = newCondition.Reason - existingCondition.Message = newCondition.Message + changed := updateCondition(existingCondition, newCondition) existingCondition.LastHeartbeatTime = metav1.NewTime(time.Now()) + return changed } // SetStatusConditionNoHearbeat sets the corresponding condition in conditions to newCondition // without setting lastHeartbeatTime. -func SetStatusConditionNoHeartbeat(conditions *[]Condition, newCondition Condition) { +// The return value indicates if this resulted in any changes. +func SetStatusConditionNoHeartbeat(conditions *[]Condition, newCondition Condition) bool { if conditions == nil { conditions = &[]Condition{} } @@ -40,16 +37,10 @@ func SetStatusConditionNoHeartbeat(conditions *[]Condition, newCondition Conditi if existingCondition == nil { newCondition.LastTransitionTime = metav1.NewTime(time.Now()) *conditions = append(*conditions, newCondition) - return + return true } - if existingCondition.Status != newCondition.Status { - existingCondition.Status = newCondition.Status - existingCondition.LastTransitionTime = metav1.NewTime(time.Now()) - } - - existingCondition.Reason = newCondition.Reason - existingCondition.Message = newCondition.Message + return updateCondition(existingCondition, newCondition) } // RemoveStatusCondition removes the corresponding conditionType from conditions. @@ -67,6 +58,25 @@ func RemoveStatusCondition(conditions *[]Condition, conditionType ConditionType) *conditions = newConditions } +func updateCondition(existingCondition *Condition, newCondition Condition) bool { + changed := false + if existingCondition.Status != newCondition.Status { + changed = true + existingCondition.Status = newCondition.Status + existingCondition.LastTransitionTime = metav1.NewTime(time.Now()) + } + + if existingCondition.Reason != newCondition.Reason { + changed = true + existingCondition.Reason = newCondition.Reason + } + if existingCondition.Message != newCondition.Message { + changed = true + existingCondition.Message = newCondition.Message + } + return changed +} + // FindStatusCondition finds the conditionType in conditions. func FindStatusCondition(conditions []Condition, conditionType ConditionType) *Condition { for i := range conditions { diff --git a/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go b/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go index a496bb9e2a8..6d280e9a7da 100644 --- a/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go +++ b/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go @@ -6,25 +6,20 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// LastApplyStatusType indicates the status of the customization on the last -// applied cluster deployment. This is used for inventory sorting process to -// reduce the likelihood of using a broken customization repeatedly. -type LastApplyStatusType string - const ( - // LastApplySucceeded indicates that the customization + // CustomizationApplyReasonSucceeded indicates that the customization // worked properly on the last applied cluster deployment. - LastApplySucceeded LastApplyStatusType = "Succeeded" - // LastApplyBrokenSyntax indicates that Hive failed to apply + CustomizationApplyReasonSucceeded = "Succeeded" + // CustomizationApplyReasonBrokenSyntax indicates that Hive failed to apply // customization patches on install-config. More details would be found in // Valid condition message. - LastApplyBrokenSyntax LastApplyStatusType = "BrokenBySyntax" - // LastApplyBrokenCloud indicates that cluster deployment provision has failed + CustomizationApplyReasonBrokenSyntax = "BrokenBySyntax" + // CustomizationApplyReasonBrokenCloud indicates that cluster deployment provision has failed // when using this customization. More details would be found in the Valid condition message. - LastApplyBrokenCloud LastApplyStatusType = "BrokenByCloud" - // LastApplyInstallationPending indicates that the customization patches have + CustomizationApplyReasonBrokenCloud = "BrokenByCloud" + // CustomizationApplyReasonInstallationPending indicates that the customization patches have // been successfully applied but provisioning is not completed yet. - LastApplyInstallationPending LastApplyStatusType = "InstallationPending" + CustomizationApplyReasonInstallationPending = "InstallationPending" ) // +genclient @@ -74,13 +69,9 @@ type ClusterDeploymentCustomizationStatus struct { // +optional ClusterPoolRef *corev1.LocalObjectReference `json:"clusterPoolRef,omitempty"` - // LastApplyTime indicates the time when the customization was applied on a cluster deployment. - // +optional - LastApplyTime metav1.Time `json:"lastApplyTime,omitempty"` - - // LastApplyStatus indicates the customization status in the last applied cluster deployment. + // LastAppliedConfiguration contains the last applied patches to the install-config. // +optional - LastApplyStatus LastApplyStatusType `json:"lastApplyStatus,omitempty"` + LastAppliedConfiguration string `json:"lastAppliedConfiguration,omitempty"` // Conditions describes the state of the operator's reconciliation functionality. // +patchMergeKey=type @@ -90,7 +81,7 @@ type ClusterDeploymentCustomizationStatus struct { } const ( - ClusterDeploymentCustomizationValid conditionsv1.ConditionType = "Valid" + ApplySucceededCondition conditionsv1.ConditionType = "ApplySucceeded" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/github.com/openshift/hive/apis/hive/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/hive/apis/hive/v1/zz_generated.deepcopy.go index a41584ab13e..ac12a798fc7 100644 --- a/vendor/github.com/openshift/hive/apis/hive/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/hive/apis/hive/v1/zz_generated.deepcopy.go @@ -767,7 +767,11 @@ func (in *ClusterDeploymentCustomizationStatus) DeepCopyInto(out *ClusterDeploym *out = new(corev1.LocalObjectReference) **out = **in } - in.LastApplyTime.DeepCopyInto(&out.LastApplyTime) + if in.ClusterPoolRef != nil { + in, out := &in.ClusterPoolRef, &out.ClusterPoolRef + *out = new(corev1.LocalObjectReference) + **out = **in + } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make([]conditionsv1.Condition, len(*in)) diff --git a/vendor/modules.txt b/vendor/modules.txt index 70757254c65..39fcfa387df 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -917,7 +917,7 @@ github.com/openshift/cluster-api-provider-ovirt/pkg/apis/ovirtprovider/v1beta1 ## explicit; go 1.16 github.com/openshift/cluster-autoscaler-operator/pkg/apis/autoscaling/v1 github.com/openshift/cluster-autoscaler-operator/pkg/apis/autoscaling/v1beta1 -# github.com/openshift/custom-resource-status v1.1.2 +# github.com/openshift/custom-resource-status v1.1.3-0.20220503160415-f2fdb4999d87 ## explicit; go 1.12 github.com/openshift/custom-resource-status/conditions/v1 # github.com/openshift/generic-admission-server v1.14.1-0.20200903115324-4ddcdd976480 From 9487de4d2bf9193dcf113a6ce929714d15496767 Mon Sep 17 00:00:00 2001 From: Alexander Braverman Masis Date: Thu, 12 May 2022 17:29:11 +0300 Subject: [PATCH 08/27] Fixes based on latest review --- .../clusterdeploymentcustomization_types.go | 1 + .../clusterdeployment_controller.go | 15 +- .../clusterpool/clusterpool_controller.go | 43 ++- pkg/controller/clusterpool/collections.go | 324 ++++++++++-------- 4 files changed, 230 insertions(+), 153 deletions(-) diff --git a/apis/hive/v1/clusterdeploymentcustomization_types.go b/apis/hive/v1/clusterdeploymentcustomization_types.go index 6d280e9a7da..71a6351445c 100644 --- a/apis/hive/v1/clusterdeploymentcustomization_types.go +++ b/apis/hive/v1/clusterdeploymentcustomization_types.go @@ -70,6 +70,7 @@ type ClusterDeploymentCustomizationStatus struct { ClusterPoolRef *corev1.LocalObjectReference `json:"clusterPoolRef,omitempty"` // LastAppliedConfiguration contains the last applied patches to the install-config. + // The information will retain for reference in case the customization is updated. // +optional LastAppliedConfiguration string `json:"lastAppliedConfiguration,omitempty"` diff --git a/pkg/controller/clusterdeployment/clusterdeployment_controller.go b/pkg/controller/clusterdeployment/clusterdeployment_controller.go index 40fe36d98c6..88faab2770c 100644 --- a/pkg/controller/clusterdeployment/clusterdeployment_controller.go +++ b/pkg/controller/clusterdeployment/clusterdeployment_controller.go @@ -1442,13 +1442,14 @@ func (r *ReconcileClusterDeployment) removeClusterDeploymentFinalizer(cd *hivev1 } func (r *ReconcileClusterDeployment) releaseCustomization(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) error { - if cpRef := cd.Spec.ClusterPoolRef; cpRef == nil || cpRef.CustomizationRef == nil { + cpRef := cd.Spec.ClusterPoolRef + if cpRef == nil || cpRef.CustomizationRef == nil { return nil } cdc := &hivev1.ClusterDeploymentCustomization{} - cdcNamespace := cd.Spec.ClusterPoolRef.Namespace - cdcName := cd.Spec.ClusterPoolRef.CustomizationRef.Name + cdcNamespace := cpRef.Namespace + cdcName := cpRef.CustomizationRef.Name cdcLog := cdLog.WithField("customization", cdcName).WithField("namespace", cdcNamespace) err := r.Client.Get(context.TODO(), client.ObjectKey{Namespace: cdcNamespace, Name: cdcName}, cdc) if err != nil { @@ -1462,14 +1463,18 @@ func (r *ReconcileClusterDeployment) releaseCustomization(cd *hivev1.ClusterDepl changed := conditionsv1.SetStatusConditionNoHeartbeat(&cdc.Status.Conditions, conditionsv1.Condition{ Type: conditionsv1.ConditionAvailable, - Status: corev1.ConditionFalse, + Status: corev1.ConditionTrue, Reason: "Available", Message: "available", }) - if changed { + if cdc.Status.ClusterPoolRef != nil || cdc.Status.ClusterDeploymentRef != nil { cdc.Status.ClusterPoolRef = nil cdc.Status.ClusterDeploymentRef = nil + changed = true + } + + if changed { if err := r.Status().Update(context.Background(), cdc); err != nil { cdcLog.WithError(err).Error("failed to update ClusterDeploymentCustomizationAvailable condition") return err diff --git a/pkg/controller/clusterpool/clusterpool_controller.go b/pkg/controller/clusterpool/clusterpool_controller.go index a09a938aa5b..8b282183033 100644 --- a/pkg/controller/clusterpool/clusterpool_controller.go +++ b/pkg/controller/clusterpool/clusterpool_controller.go @@ -174,13 +174,44 @@ func AddToManager(mgr manager.Manager, r *ReconcileClusterPool, concurrentReconc } // Watch for changes to ClusterDeploymentCustomizations - if err := c.Watch(&source.Kind{Type: &hivev1.ClusterDeploymentCustomization{}}, &handler.EnqueueRequestForObject{}); err != nil { + if err := c.Watch( + &source.Kind{Type: &hivev1.ClusterDeploymentCustomization{}}, + handler.EnqueueRequestsFromMapFunc( + requestsForCDCResources(r.Client, r.logger)), + ); err != nil { return err } return nil } +func requestsForCDCResources(c client.Client, logger log.FieldLogger) handler.MapFunc { + return func(o client.Object) []reconcile.Request { + _, ok := o.(*hivev1.ClusterDeploymentCustomization) + if !ok { + return nil + } + + cpList := &hivev1.ClusterPoolList{} + if err := c.List(context.Background(), cpList, client.InNamespace(o.GetNamespace())); err != nil { + logger.WithError(err).Log(controllerutils.LogLevel(err), "failed to list cluster pools for CDC resource") + return nil + } + + var requests []reconcile.Request + for _, cpl := range cpList.Items { + requests = append(requests, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: cpl.Namespace, + Name: cpl.Name, + }, + }) + } + + return requests + } +} + func requestsForCDRBACResources(c client.Client, resourceName string, logger log.FieldLogger) handler.MapFunc { return func(o client.Object) []reconcile.Request { if o.GetName() != resourceName { @@ -794,7 +825,7 @@ func (r *ReconcileClusterPool) patchInstallConfig(clp *hivev1.ClusterPool, cd *h installConfig, err := newPatch.Apply([]byte(secret.StringData["install-config.yaml"])) if err != nil { - cdcs.BrokenSyntax(r, cdc, fmt.Sprint(err)) + cdcs.BrokenBySyntax(r, cdc, fmt.Sprint(err)) cdcs.UpdateInventoryValidCondition(r, clp) return err } @@ -804,12 +835,14 @@ func (r *ReconcileClusterPool) patchInstallConfig(clp *hivev1.ClusterPool, cd *h return err } - cdc.Status.LastAppliedConfiguration = string(configJson) - cdc.Status.ClusterPoolRef = &corev1.LocalObjectReference{Name: clp.Name} - if err := cdcs.Reserve(r, cdc); err != nil { + if err := cdcs.Reserve(r, cdc, cd.Name, clp.Name); err != nil { + return err + } + if err := cdcs.InstallationPending(r, cdc); err != nil { return err } + cdc.Status.LastAppliedConfiguration = string(configJson) secret.StringData["install-config.yaml"] = string(installConfig) return nil } diff --git a/pkg/controller/clusterpool/collections.go b/pkg/controller/clusterpool/collections.go index 6ae16a709f1..db3d0f80311 100644 --- a/pkg/controller/clusterpool/collections.go +++ b/pkg/controller/clusterpool/collections.go @@ -20,6 +20,7 @@ import ( hivev1 "github.com/openshift/hive/apis/hive/v1" "github.com/openshift/hive/pkg/constants" controllerutils "github.com/openshift/hive/pkg/controller/utils" + apierrors "k8s.io/apimachinery/pkg/api/errors" ) type claimCollection struct { @@ -616,11 +617,11 @@ type cdcCollection struct { // Missing CDC means listed in pool inventory but the custom resource doesn't exist in the pool namespace missing []string // Used by some cluster deployment - reserved []*hivev1.ClusterDeploymentCustomization + reserved map[string]*hivev1.ClusterDeploymentCustomization // Last Cluster Deployment failed on provision - cloud []*hivev1.ClusterDeploymentCustomization + cloud map[string]*hivev1.ClusterDeploymentCustomization // Failed to apply patches for this cluster pool - syntax []*hivev1.ClusterDeploymentCustomization + syntax map[string]*hivev1.ClusterDeploymentCustomization // ByCDCName are all the CDCs listed in the pool inventory, the CR exists and are mapped by name byCDCName map[string]*hivev1.ClusterDeploymentCustomization // Namespace are all the CDC in the namespace mapped by name @@ -644,9 +645,9 @@ func getAllCustomizationsForPool(c client.Client, pool *hivev1.ClusterPool, logg cdcCol := cdcCollection{ unassigned: make([]*hivev1.ClusterDeploymentCustomization, 0), missing: make([]string, 0), - reserved: make([]*hivev1.ClusterDeploymentCustomization, 0), - cloud: make([]*hivev1.ClusterDeploymentCustomization, 0), - syntax: make([]*hivev1.ClusterDeploymentCustomization, 0), + reserved: make(map[string]*hivev1.ClusterDeploymentCustomization), + cloud: make(map[string]*hivev1.ClusterDeploymentCustomization), + syntax: make(map[string]*hivev1.ClusterDeploymentCustomization), byCDCName: make(map[string]*hivev1.ClusterDeploymentCustomization), namespace: make(map[string]*hivev1.ClusterDeploymentCustomization), } @@ -662,17 +663,17 @@ func getAllCustomizationsForPool(c client.Client, pool *hivev1.ClusterPool, logg if cdRef := cdc.Status.ClusterDeploymentRef; cdRef == nil { cdcCol.unassigned = append(cdcCol.unassigned, cdc) } else { - cdcCol.reserved = append(cdcCol.reserved, cdc) + cdcCol.reserved[item.Name] = cdc } applyStatus := conditionsv1.FindStatusCondition(cdc.Status.Conditions, hivev1.ApplySucceededCondition) if applyStatus == nil { continue } if applyStatus.Reason == hivev1.CustomizationApplyReasonBrokenCloud { - cdcCol.cloud = append(cdcCol.cloud, cdc) + cdcCol.cloud[item.Name] = cdc } if applyStatus.Reason == hivev1.CustomizationApplyReasonBrokenSyntax { - cdcCol.syntax = append(cdcCol.cloud, cdc) + cdcCol.syntax[item.Name] = cdc } } else { cdcCol.missing = append(cdcCol.missing, item.Name) @@ -699,17 +700,18 @@ func (cdcs *cdcCollection) Sort() { sort.Slice( cdcs.unassigned, func(i, j int) bool { + now := metav1.NewTime(time.Now()) iStatus := conditionsv1.FindStatusCondition(cdcs.unassigned[i].Status.Conditions, hivev1.ApplySucceededCondition) jStatus := conditionsv1.FindStatusCondition(cdcs.unassigned[j].Status.Conditions, hivev1.ApplySucceededCondition) iName := cdcs.unassigned[i].Name jName := cdcs.unassigned[j].Name - if iStatus == nil { + if iStatus == nil || iStatus.Status == corev1.ConditionUnknown { iStatus = &conditionsv1.Condition{Reason: hivev1.CustomizationApplyReasonSucceeded} - iStatus.LastTransitionTime = metav1.NewTime(time.Now()) + iStatus.LastTransitionTime = now } - if jStatus == nil { + if jStatus == nil || jStatus.Status == corev1.ConditionUnknown { jStatus = &conditionsv1.Condition{Reason: hivev1.CustomizationApplyReasonSucceeded} - jStatus.LastTransitionTime = metav1.NewTime(time.Now()) + jStatus.LastTransitionTime = now } iTime := iStatus.LastTransitionTime jTime := jStatus.LastTransitionTime @@ -732,146 +734,142 @@ func (cdcs *cdcCollection) Sort() { } // Reserve -func (cdcs *cdcCollection) Reserve(c client.Client, cdc *hivev1.ClusterDeploymentCustomization) error { +func (cdcs *cdcCollection) Reserve(c client.Client, cdc *hivev1.ClusterDeploymentCustomization, cdName, poolName string) error { + cdc.Status.ClusterDeploymentRef = &corev1.LocalObjectReference{Name: cdName} + cdc.Status.ClusterPoolRef = &corev1.LocalObjectReference{Name: poolName} + + changed := conditionsv1.SetStatusConditionNoHeartbeat(&cdc.Status.Conditions, conditionsv1.Condition{ + Type: conditionsv1.ConditionAvailable, + Status: corev1.ConditionFalse, + Reason: "Reserved", + Message: "reserved", + }) + + if changed { + if err := c.Status().Update(context.Background(), cdc); err != nil { + return err + } + } + + cdcs.reserved[cdc.Name] = cdc + cdcs.byCDCName[cdc.Name] = cdc + for i, cdci := range cdcs.unassigned { if cdci.Name == cdc.Name { - reservationChanged := conditionsv1.SetStatusConditionNoHeartbeat(&cdc.Status.Conditions, conditionsv1.Condition{ - Type: conditionsv1.ConditionAvailable, - Status: corev1.ConditionFalse, - Reason: "Reserved", - Message: "reserved", - }) - - applyChanged := conditionsv1.SetStatusConditionNoHeartbeat(&cdc.Status.Conditions, conditionsv1.Condition{ - Type: hivev1.ApplySucceededCondition, - Status: corev1.ConditionFalse, - Reason: hivev1.CustomizationApplyReasonInstallationPending, - Message: "Cluster installation in progress", - }) - - if reservationChanged || applyChanged { - if err := c.Status().Update(context.Background(), cdc); err != nil { - return err - } - } - - cdcs.byCDCName[cdc.Name] = cdc - cdcs.reserved = append(cdcs.reserved, cdc) copy(cdcs.unassigned[i:], cdcs.unassigned[i+1:]) cdcs.unassigned = cdcs.unassigned[:len(cdcs.unassigned)-1] - applyStatus := conditionsv1.FindStatusCondition(cdc.Status.Conditions, hivev1.ApplySucceededCondition) - if applyStatus != nil && applyStatus.Reason == hivev1.CustomizationApplyReasonBrokenCloud { - cdcs.cloud = append(cdcs.cloud, cdc) - } - if applyStatus != nil && applyStatus.Reason == hivev1.CustomizationApplyReasonBrokenSyntax { - cdcs.syntax = append(cdcs.cloud, cdc) - } - cdcs.Sort() - return nil + break } } - return fmt.Errorf("ClusterDeploymentCustomization %s is not reserved, but also was not found in the unassigned list; this is a bug", cdc.Name) + + cdcs.Sort() + return nil } -// Available -func (cdcs *cdcCollection) Available(c client.Client, cdc *hivev1.ClusterDeploymentCustomization) error { - for i, cdci := range cdcs.reserved { - if cdci.Name == cdc.Name { - cdc.Status.ClusterDeploymentRef = nil - cdc.Status.ClusterPoolRef = nil - changed := conditionsv1.SetStatusConditionNoHeartbeat(&cdc.Status.Conditions, conditionsv1.Condition{ - Type: conditionsv1.ConditionAvailable, - Status: corev1.ConditionTrue, - Reason: "Available", - Message: "available", - }) - - if changed { - if err := c.Status().Update(context.Background(), cdci); err != nil { - return err - } - } +// Unassign +func (cdcs *cdcCollection) Unassign(c client.Client, cdc *hivev1.ClusterDeploymentCustomization) error { + changed := conditionsv1.SetStatusConditionNoHeartbeat(&cdc.Status.Conditions, conditionsv1.Condition{ + Type: conditionsv1.ConditionAvailable, + Status: corev1.ConditionTrue, + Reason: "Available", + Message: "available", + }) - cdcs.byCDCName[cdc.Name] = cdc - cdcs.unassigned = append(cdcs.unassigned, cdc) - copy(cdcs.reserved[i:], cdcs.reserved[i+1:]) - cdcs.reserved = cdcs.reserved[:len(cdcs.reserved)-1] - applyStatus := conditionsv1.FindStatusCondition(cdc.Status.Conditions, hivev1.ApplySucceededCondition) - if applyStatus != nil && applyStatus.Reason == hivev1.CustomizationApplyReasonBrokenCloud { - cdcs.cloud = append(cdcs.cloud, cdc) - } - if applyStatus != nil && applyStatus.Reason == hivev1.CustomizationApplyReasonBrokenSyntax { - cdcs.syntax = append(cdcs.cloud, cdc) - } - cdcs.Sort() - return nil + if cdc.Status.ClusterDeploymentRef != nil || cdc.Status.ClusterPoolRef != nil { + cdc.Status.ClusterDeploymentRef = nil + cdc.Status.ClusterPoolRef = nil + changed = true + } + + if changed { + if err := c.Status().Update(context.Background(), cdc); err != nil { + return err } } - return fmt.Errorf("ClusterDeploymentCustomization %s is not reserved, but also was not found in the unassigned list; this is a bug", cdc.Name) + + if _, ok := cdcs.reserved[cdc.Name]; ok { + delete(cdcs.reserved, cdc.Name) + } + + cdcs.unassigned = append(cdcs.unassigned, cdc) + cdcs.Sort() + return nil } -func (cdcs *cdcCollection) BrokenSyntax(c client.Client, cdc *hivev1.ClusterDeploymentCustomization, msg string) { - for _, cdci := range cdcs.unassigned { - if cdci.Name == cdc.Name { - changed := conditionsv1.SetStatusConditionNoHeartbeat(&cdc.Status.Conditions, conditionsv1.Condition{ - Type: hivev1.ApplySucceededCondition, - Status: corev1.ConditionFalse, - Reason: hivev1.CustomizationApplyReasonBrokenSyntax, - Message: msg, - }) - - if changed { - c.Status().Update(context.Background(), cdci) - } +func (cdcs *cdcCollection) BrokenBySyntax(c client.Client, cdc *hivev1.ClusterDeploymentCustomization, msg string) error { + changed := conditionsv1.SetStatusConditionNoHeartbeat(&cdc.Status.Conditions, conditionsv1.Condition{ + Type: hivev1.ApplySucceededCondition, + Status: corev1.ConditionFalse, + Reason: hivev1.CustomizationApplyReasonBrokenSyntax, + Message: msg, + }) - cdcs.syntax = append(cdcs.syntax, cdc) + if changed { + if err := c.Status().Update(context.Background(), cdc); err != nil { + return err } } + + cdcs.syntax[cdc.Name] = cdc + delete(cdcs.cloud, cdc.Name) + return nil } -func (cdcs *cdcCollection) BrokenCloud(c client.Client, cdc *hivev1.ClusterDeploymentCustomization) error { - for _, cdci := range cdcs.reserved { - if cdci.Name == cdc.Name { - changed := conditionsv1.SetStatusConditionNoHeartbeat(&cdc.Status.Conditions, conditionsv1.Condition{ - Type: hivev1.ApplySucceededCondition, - Status: corev1.ConditionFalse, - Reason: hivev1.CustomizationApplyReasonBrokenCloud, - Message: "Cluster installation failed. This may or may not be the fault of patches. Check the installation logs.", - }) - - if changed { - if err := c.Status().Update(context.Background(), cdci); err != nil { - return err - } - } +func (cdcs *cdcCollection) BrokenByCloud(c client.Client, cdc *hivev1.ClusterDeploymentCustomization) error { + changed := conditionsv1.SetStatusConditionNoHeartbeat(&cdc.Status.Conditions, conditionsv1.Condition{ + Type: hivev1.ApplySucceededCondition, + Status: corev1.ConditionFalse, + Reason: hivev1.CustomizationApplyReasonBrokenCloud, + Message: "Cluster installation failed. This may or may not be the fault of patches. Check the installation logs.", + }) - cdcs.cloud = append(cdcs.cloud, cdc) - return nil + if changed { + if err := c.Status().Update(context.Background(), cdc); err != nil { + return err } } - return fmt.Errorf("ClusterDeploymentCustomization %s is broken by cloud, but is not found in the unassigned list; this is a bug", cdc.Name) + + cdcs.cloud[cdc.Name] = cdc + delete(cdcs.syntax, cdc.Name) + return nil } func (cdcs *cdcCollection) Succeeded(c client.Client, cdc *hivev1.ClusterDeploymentCustomization) error { - for _, cdci := range cdcs.reserved { - if cdci.Name == cdc.Name { - changed := conditionsv1.SetStatusConditionNoHeartbeat(&cdc.Status.Conditions, conditionsv1.Condition{ - Type: hivev1.ApplySucceededCondition, - Status: corev1.ConditionTrue, - Reason: hivev1.CustomizationApplyReasonSucceeded, - Message: "Patches applied and cluster installed successfully", - }) - - if changed { - if err := c.Status().Update(context.Background(), cdci); err != nil { - return err - } - } + changed := conditionsv1.SetStatusConditionNoHeartbeat(&cdc.Status.Conditions, conditionsv1.Condition{ + Type: hivev1.ApplySucceededCondition, + Status: corev1.ConditionTrue, + Reason: hivev1.CustomizationApplyReasonSucceeded, + Message: "Patches applied and cluster installed successfully", + }) - return nil + if changed { + if err := c.Status().Update(context.Background(), cdc); err != nil { + return err } } - return fmt.Errorf("ClusterDeploymentCustomization %s successfuly applied, but is not found in the reserved list; this is a bug", cdc.Name) + + delete(cdcs.syntax, cdc.Name) + delete(cdcs.cloud, cdc.Name) + return nil +} + +func (cdcs *cdcCollection) InstallationPending(c client.Client, cdc *hivev1.ClusterDeploymentCustomization) error { + changed := conditionsv1.SetStatusConditionNoHeartbeat(&cdc.Status.Conditions, conditionsv1.Condition{ + Type: hivev1.ApplySucceededCondition, + Status: corev1.ConditionFalse, + Reason: hivev1.CustomizationApplyReasonInstallationPending, + Message: "Patches applied and cluster installed successfully", + }) + + if changed { + if err := c.Status().Update(context.Background(), cdc); err != nil { + return err + } + } + + delete(cdcs.syntax, cdc.Name) + delete(cdcs.cloud, cdc.Name) + return nil } // SyncClusterDeploymentCustomizations makes sure that CDCs relagted to the pool, the pool inventory and related ClusterDeployments @@ -883,20 +881,33 @@ func (cdcs *cdcCollection) SyncClusterDeploymentCustomizationAssignments(c clien // Handle deletion of CDC in the namespace for _, cdc := range cdcs.namespace { - if cdc.DeletionTimestamp != nil && !controllerutils.HasFinalizer(cdc, finalizer) { - controllerutils.AddFinalizer(cdc, finalizer) - if err := c.Update(context.Background(), cdc); err != nil { - return err + isDeleted := cdc.DeletionTimestamp != nil + hasFinalizer := controllerutils.HasFinalizer(cdc, finalizer) + isAvailable := conditionsv1.IsStatusConditionTrue(cdc.Status.Conditions, conditionsv1.ConditionAvailable) + if isDeleted && isAvailable { + // We can delete the finalizer for a deleted CDC only if it is not reserved + if hasFinalizer { + controllerutils.DeleteFinalizer(cdc, finalizer) + if err := c.Update(context.Background(), cdc); err != nil { + return err + } + } + } else { + // Ensure the finalizer is present if the CDC is not deleted, OR if it is reserved + if !hasFinalizer { + controllerutils.AddFinalizer(cdc, finalizer) + if err := c.Update(context.Background(), cdc); err != nil { + return err + } } - } else if conditionsv1.IsStatusConditionTrue(cdc.Status.Conditions, conditionsv1.ConditionAvailable) { - controllerutils.DeleteFinalizer(cdc, finalizer) } } // If there is no CD, but CDC is reserved, then we release the CDC + // Last apply condition preserved for _, cdc := range cdcs.reserved { if cds.ByName(cdc.Status.ClusterDeploymentRef.Name) == nil { - if err := cdcs.Available(c, cdc); err != nil { + if err := cdcs.Unassign(c, cdc); err != nil { return err } } @@ -913,18 +924,39 @@ func (cdcs *cdcCollection) SyncClusterDeploymentCustomizationAssignments(c clien // CDC exists cdc, ok := cdcs.namespace[cpRef.CustomizationRef.Name] if !ok { + logger.Warningf("CD %s has CDC %s that doesn't exist, this is a bug", cd.Name, cpRef.CustomizationRef.Name) continue } - // CDC is no reserved + // CDC is not reserved available := conditionsv1.FindStatusCondition(cdc.Status.Conditions, conditionsv1.ConditionAvailable) - if available == nil || available.Status == corev1.ConditionTrue { + if available == nil || available.Status == corev1.ConditionUnknown || available.Status == corev1.ConditionTrue { + // CDC is used by other CD + if cdRef := cdc.Status.ClusterDeploymentRef; cdRef != nil && cdRef.Name != cd.Name { + cdOther := &hivev1.ClusterDeployment{} + if err := c.Get( + context.Background(), + client.ObjectKey{Name: cdRef.Name, Namespace: cdRef.Name}, + cdOther, + ); err != nil { + if !apierrors.IsNotFound(err) { + return err + } + } else { + // Fixing reservation should be done by the appropriate cluster pool + continue + } + } // Fix CDC availability - cdc.Status.ClusterDeploymentRef = &corev1.LocalObjectReference{Name: cd.Name} - cdc.Status.ClusterPoolRef = &corev1.LocalObjectReference{Name: pool.Name} - if err := cdcs.Reserve(c, cdc); err != nil { + if err := cdcs.Reserve(c, cdc, cd.Name, pool.Name); err != nil { return err } + + if cd.Spec.Installed { + cdcs.Succeeded(c, cdc) + } else { + cdcs.InstallationPending(c, cdc) + } } } @@ -934,22 +966,27 @@ func (cdcs *cdcCollection) SyncClusterDeploymentCustomizationAssignments(c clien if cdcRef == nil { continue } - if cdc := cdcs.byCDCName[cdcRef.Name]; cdc != nil { - if err := cdcs.BrokenCloud(c, cdc); err != nil { + if cdc, ok := cdcs.byCDCName[cdcRef.Name]; ok { + if err := cdcs.BrokenByCloud(c, cdc); err != nil { return err } + } else { + logger.Warningf("CD %s has CDC %s that doesn't exist, this is a bug", cd.Name, cdcRef.Name) } } // Notice a CD has finished installing => update the CDC's ApplySucceeded condition to Success; for _, cd := range cds.Installed() { - if cd.Spec.ClusterPoolRef.CustomizationRef == nil { + cdcRef := cd.Spec.ClusterPoolRef.CustomizationRef + if cdcRef == nil { continue } - if cdc, ok := cdcs.byCDCName[cd.Spec.ClusterPoolRef.CustomizationRef.Name]; ok { + if cdc, ok := cdcs.byCDCName[cdcRef.Name]; ok { if err := cdcs.Succeeded(c, cdc); err != nil { return err } + } else { + logger.Warningf("CD %s has CDC %s that doesn't exist, this is a bug", cd.Name, cdcRef.Name) } } @@ -966,7 +1003,6 @@ func (cdcs *cdcCollection) UpdateInventoryValidCondition(c client.Client, pool * status := corev1.ConditionTrue reason := hivev1.InventoryReasonValid if (len(cdcs.syntax) + len(cdcs.cloud) + len(cdcs.missing)) > 0 { - var _ json.Marshaler = &cdcCollection{} messageByte, err := json.Marshal(cdcs) if err != nil { return err @@ -995,6 +1031,8 @@ func (cdcs *cdcCollection) UpdateInventoryValidCondition(c client.Client, pool * return nil } +var _ json.Marshaler = &cdcCollection{} + func (cdcs *cdcCollection) MarshalJSON() ([]byte, error) { cloud := []string{} for _, cdc := range cdcs.cloud { From f4d21fcc8f082193134beef0fa0910bc2a81ae7f Mon Sep 17 00:00:00 2001 From: Alexander Braverman Masis Date: Sun, 15 May 2022 14:52:53 +0300 Subject: [PATCH 09/27] All comments applied --- apis/hive/v1/clusterdeployment_types.go | 4 - ...ft.io_clusterdeploymentcustomizations.yaml | 3 +- go.sum | 1 - hack/app-sre/saas-template.yaml | 3 +- .../clusterdeployment_controller.go | 21 +- .../clusterdeployment_controller_test.go | 33 +++ .../clusterpool/clusterpool_controller.go | 24 +- .../clusterpool_controller_test.go | 210 ++++++++++++------ pkg/controller/clusterpool/collections.go | 65 ++++-- .../hibernation_controller_test.go | 1 - ...customization_validating_admission_hook.go | 17 +- .../clusterpool_validating_admission_hook.go | 16 -- .../apis/hive/v1/clusterdeployment_types.go | 4 - .../clusterdeploymentcustomization_types.go | 1 + 14 files changed, 260 insertions(+), 143 deletions(-) diff --git a/apis/hive/v1/clusterdeployment_types.go b/apis/hive/v1/clusterdeployment_types.go index e83976cf855..07eaa045b8f 100644 --- a/apis/hive/v1/clusterdeployment_types.go +++ b/apis/hive/v1/clusterdeployment_types.go @@ -52,10 +52,6 @@ const ( // FinalizerArgoCDCluster is used on ClusterDeployments to ensure we clean up the ArgoCD cluster // secret before cleaning up the API object. FinalizerArgoCDCluster = "hive.openshift.io/argocd-cluster" - - // FinalizerCustomizationRelease is used on ClusterDeployments to ensure we run a successful - // release of ClusterDeploymentCustomization - FinalizerCustomizationRelease string = "hive.openshift.io/customization" ) // ClusterPowerState is used to indicate whether a cluster is running or in a diff --git a/config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml b/config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml index 04660f92b74..776c1e1ef28 100644 --- a/config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml +++ b/config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml @@ -116,7 +116,8 @@ spec: type: array lastAppliedConfiguration: description: LastAppliedConfiguration contains the last applied patches - to the install-config. + to the install-config. The information will retain for reference + in case the customization is updated. type: string type: object required: diff --git a/go.sum b/go.sum index 0e7055eda69..ce620292034 100644 --- a/go.sum +++ b/go.sum @@ -1046,7 +1046,6 @@ github.com/openshift/cluster-api-provider-ovirt v0.1.1-0.20220323121149-e3f2850d github.com/openshift/cluster-api-provider-ovirt v0.1.1-0.20220323121149-e3f2850dd519/go.mod h1:C7unCUThP8eqT4xQfbvg3oIDn2S9TYtb0wbBoH/SR2U= github.com/openshift/cluster-autoscaler-operator v0.0.0-20211006175002-fe524080b551 h1:nGa6igwzG7smZOACUsovgf9XG8vT96Zdyc4H6r2rqS0= github.com/openshift/cluster-autoscaler-operator v0.0.0-20211006175002-fe524080b551/go.mod h1:72ieWchfTx9U7UbQO47vhSXBoCi2IJGZhXoCezan4EM= -github.com/openshift/custom-resource-status v1.1.2/go.mod h1:DB/Mf2oTeiAmVVX1gN+NEqweonAPY0TKUwADizj8+ZA= github.com/openshift/custom-resource-status v1.1.3-0.20220503160415-f2fdb4999d87 h1:cHyxR+Y8rAMT6m1jQCaYGRwikqahI0OjjUDhFNf3ySQ= github.com/openshift/custom-resource-status v1.1.3-0.20220503160415-f2fdb4999d87/go.mod h1:DB/Mf2oTeiAmVVX1gN+NEqweonAPY0TKUwADizj8+ZA= github.com/openshift/generic-admission-server v1.14.1-0.20200903115324-4ddcdd976480 h1:y47BAJFepK8Xls1c+quIOyc46OXiT9LRiqGVjIaMlSA= diff --git a/hack/app-sre/saas-template.yaml b/hack/app-sre/saas-template.yaml index 49c2bccd3ef..8f85e17d0f9 100644 --- a/hack/app-sre/saas-template.yaml +++ b/hack/app-sre/saas-template.yaml @@ -370,7 +370,8 @@ objects: type: array lastAppliedConfiguration: description: LastAppliedConfiguration contains the last applied - patches to the install-config. + patches to the install-config. The information will retain for + reference in case the customization is updated. type: string type: object required: diff --git a/pkg/controller/clusterdeployment/clusterdeployment_controller.go b/pkg/controller/clusterdeployment/clusterdeployment_controller.go index 88faab2770c..499d659fefc 100644 --- a/pkg/controller/clusterdeployment/clusterdeployment_controller.go +++ b/pkg/controller/clusterdeployment/clusterdeployment_controller.go @@ -563,25 +563,15 @@ func (r *ReconcileClusterDeployment) reconcile(request reconcile.Request, cd *hi } if !controllerutils.HasFinalizer(cd, hivev1.FinalizerDeprovision) { - cdLog.Debug("adding clusterdeployment deprovision finalizer") - if err := r.addClusterDeploymentFinalizer(cd, hivev1.FinalizerDeprovision); err != nil { - cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error adding deprovision finalizer") + cdLog.Debugf("adding clusterdeployment finalizer") + if err := r.addClusterDeploymentFinalizer(cd); err != nil { + cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error adding finalizer") return reconcile.Result{}, err } metricClustersCreated.WithLabelValues(hivemetrics.GetClusterDeploymentType(cd)).Inc() return reconcile.Result{}, nil } - if cd.Spec.ClusterPoolRef != nil && cd.Spec.ClusterPoolRef.CustomizationRef != nil && !controllerutils.HasFinalizer(cd, hivev1.FinalizerCustomizationRelease) { - cdLog.Debug("adding clusterdeployment customization release finalizer") - if err := r.addClusterDeploymentFinalizer(cd, hivev1.FinalizerCustomizationRelease); err != nil { - cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error adding customization finalizer") - return reconcile.Result{}, err - } - - return reconcile.Result{}, nil - } - if cd.Spec.ManageDNS { updated, result, err := r.ensureManagedDNSZone(cd, cdLog) if updated || err != nil { @@ -1416,10 +1406,9 @@ func (r *ReconcileClusterDeployment) syncDeletedClusterDeployment(cd *hivev1.Clu } } -func (r *ReconcileClusterDeployment) addClusterDeploymentFinalizer(cd *hivev1.ClusterDeployment, finalizer string) error { +func (r *ReconcileClusterDeployment) addClusterDeploymentFinalizer(cd *hivev1.ClusterDeployment) error { cd = cd.DeepCopy() - controllerutils.AddFinalizer(cd, finalizer) - + controllerutils.AddFinalizer(cd, hivev1.FinalizerDeprovision) return r.Update(context.TODO(), cd) } diff --git a/pkg/controller/clusterdeployment/clusterdeployment_controller_test.go b/pkg/controller/clusterdeployment/clusterdeployment_controller_test.go index c108fb9b096..fabfcddc3ea 100644 --- a/pkg/controller/clusterdeployment/clusterdeployment_controller_test.go +++ b/pkg/controller/clusterdeployment/clusterdeployment_controller_test.go @@ -1876,6 +1876,32 @@ func TestClusterDeploymentReconcile(t *testing.T) { require.Nil(t, cd, "expected ClusterDeployment to be deleted") }, }, + { + name: "release customization on deprovision", + existing: []runtime.Object{ + testClusterDeploymentCustomization("cdc"), + func() *hivev1.ClusterDeployment { + cd := testClusterDeploymentWithInitializedConditions(testClusterDeployment()) + cd.Spec.Installed = true + cd.Spec.ClusterPoolRef = &hivev1.ClusterPoolReference{ + Namespace: testNamespace, + CustomizationRef: &corev1.LocalObjectReference{Name: "cdc"}, + } + now := metav1.Now() + cd.DeletionTimestamp = &now + return cd + }(), + testclusterdeprovision.Build( + testclusterdeprovision.WithNamespace(testNamespace), + testclusterdeprovision.WithName(testName), + testclusterdeprovision.Completed(), + ), + }, + validate: func(c client.Client, t *testing.T) { + cd := getCD(c) + require.Nil(t, cd, "expected ClusterDeployment to be deleted") + }, + }, { name: "deprovision finished", existing: []runtime.Object{ @@ -3213,6 +3239,13 @@ func testClusterDeploymentWithInitializedConditions(cd *hivev1.ClusterDeployment return cd } +func testClusterDeploymentCustomization(name string) *hivev1.ClusterDeploymentCustomization { + cdc := &hivev1.ClusterDeploymentCustomization{} + cdc.Name = name + cdc.Namespace = testNamespace + return cdc +} + func testInstalledClusterDeployment(installedAt time.Time) *hivev1.ClusterDeployment { cd := testClusterDeployment() cd.Spec.Installed = true diff --git a/pkg/controller/clusterpool/clusterpool_controller.go b/pkg/controller/clusterpool/clusterpool_controller.go index 8b282183033..87fe1e4ef5f 100644 --- a/pkg/controller/clusterpool/clusterpool_controller.go +++ b/pkg/controller/clusterpool/clusterpool_controller.go @@ -48,7 +48,6 @@ const ( icSecretDependent = "install config template secret" cdClusterPoolIndex = "spec.clusterpool.namespacedname" claimClusterPoolIndex = "spec.clusterpoolname" - defaultInventoryAttempts = 5 ) var ( @@ -329,6 +328,7 @@ func (r *ReconcileClusterPool) Reconcile(ctx context.Context, request reconcile. if err != nil { return reconcile.Result{}, err } + claims, err := getAllClaimsForPool(r.Client, clp, logger) if err != nil { return reconcile.Result{}, err @@ -462,6 +462,7 @@ func (r *ReconcileClusterPool) Reconcile(ctx context.Context, request reconcile. return reconcile.Result{}, nil } +// reconcileRunningClusters ensures the oldest unassigned clusters are set to running, and the // remainder are set to hibernating. The number of clusters we set to running is determined by // adding the cluster's configured runningCount to the number of unsatisfied claims for which we're // spinning up new clusters. @@ -762,6 +763,7 @@ func (r *ReconcileClusterPool) createCluster( var cd *hivev1.ClusterDeployment var secret *corev1.Secret var cdPos int + // Add the ClusterPoolRef to the ClusterDeployment, and move it to the end of the slice. for i, obj := range objs { if cdTmp, ok := obj.(*hivev1.ClusterDeployment); ok { cd = cdTmp @@ -794,6 +796,16 @@ func (r *ReconcileClusterPool) createCluster( return cd, nil } +func isInstallConfigSecret(obj interface{}) *corev1.Secret { + if secret, ok := obj.(*corev1.Secret); ok { + _, ok := secret.StringData["install-config.yaml"] + if ok { + return secret + } + } + return nil +} + // patchInstallConfig responsible for applying ClusterDeploymentCustomization and its reservation func (r *ReconcileClusterPool) patchInstallConfig(clp *hivev1.ClusterPool, cd *hivev1.ClusterDeployment, secret *corev1.Secret, cdcs *cdcCollection, logger log.FieldLogger) error { if clp.Spec.Inventory == nil { @@ -1148,13 +1160,3 @@ func (r *ReconcileClusterPool) createCloudBuilder(pool *hivev1.ClusterPool, logg return nil, errors.New("unsupported platform") } } - -func isInstallConfigSecret(obj interface{}) *corev1.Secret { - if secret, ok := obj.(*corev1.Secret); ok { - _, ok := secret.StringData["install-config.yaml"] - if ok { - return secret - } - } - return nil -} diff --git a/pkg/controller/clusterpool/clusterpool_controller_test.go b/pkg/controller/clusterpool/clusterpool_controller_test.go index 2fda5efe395..43d0dcb2641 100644 --- a/pkg/controller/clusterpool/clusterpool_controller_test.go +++ b/pkg/controller/clusterpool/clusterpool_controller_test.go @@ -81,13 +81,16 @@ func TestReconcileClusterPool(t *testing.T) { }), ) - inventoryPoolBuilder := initializedPoolBuilder.Options( - testcp.WithInventory([]string{"test-cdc-1"}), - testcp.WithCondition(hivev1.ClusterPoolCondition{ - Status: corev1.ConditionUnknown, - Type: hivev1.ClusterPoolInventoryValidCondition, - }), - ) + inventoryPoolVersion := "e0bc44f74a546c63" + inventoryPoolBuilder := func() testcp.Builder { + return initializedPoolBuilder.Options( + testcp.WithInventory([]string{"test-cdc-1"}), + testcp.WithCondition(hivev1.ClusterPoolCondition{ + Status: corev1.ConditionUnknown, + Type: hivev1.ClusterPoolInventoryValidCondition, + }), + ) + } cdBuilder := func(name string) testcd.Builder { return testcd.FullBuilder(name, name, scheme).Options( @@ -108,8 +111,8 @@ func TestReconcileClusterPool(t *testing.T) { existing []runtime.Object noClusterImageSet bool noCredsSecret bool - inventory bool expectError bool + expectInventory bool expectedTotalClusters int expectedObservedSize int32 expectedObservedReady int32 @@ -130,10 +133,10 @@ func TestReconcileClusterPool(t *testing.T) { // Map, keyed by claim name, of expected Status.Conditions['Pending'].Reason. // (The clusterpool controller always sets this condition's Status to True.) // Not checked if nil. - expectedClaimPendingReasons map[string]string - expectedInventoryNext string - expectPoolVersionChanged bool - expectedPoolVersion string + expectedClaimPendingReasons map[string]string + expectedInventoryAssignmentOrder []string + expectPoolVersionChanged bool + expectedPoolVersion string }{ { name: "initialize conditions", @@ -194,115 +197,169 @@ func TestReconcileClusterPool(t *testing.T) { existing: []runtime.Object{ initializedPoolBuilder.Build(testcp.WithInventory([]string{"test-cdc-1"})), }, - inventory: true, expectPoolVersionChanged: true, }, { name: "poolVersion doens't change with existing inventory when entry added", existing: []runtime.Object{ - inventoryPoolBuilder.Build(testcp.WithInventory([]string{"test-cdc-1", "test-cdc-2"})), + inventoryPoolBuilder().Build(testcp.WithInventory([]string{"test-cdc-1", "test-cdc-2"})), }, - inventory: true, expectPoolVersionChanged: false, - expectedPoolVersion: "06983eaafac7f695", + expectInventory: true, }, { name: "poolVersion changes when inventory removed", existing: []runtime.Object{ - inventoryPoolBuilder.Build(testcp.WithInventory([]string{})), + inventoryPoolBuilder().Build(testcp.WithInventory([]string{})), }, - inventory: false, expectPoolVersionChanged: true, - expectedPoolVersion: "06983eaafac7f695", + expectInventory: true, }, { name: "cp with inventory and cdc exists is valid", existing: []runtime.Object{ - inventoryPoolBuilder.Build(testcp.WithSize(1)), + inventoryPoolBuilder().Build(testcp.WithSize(1)), testcdc.FullBuilder(testNamespace, "test-cdc-1", scheme).Build(), }, - inventory: true, expectedTotalClusters: 1, expectedObservedSize: 0, expectedObservedReady: 0, expectedInventoryVaildStatus: corev1.ConditionTrue, - expectedPoolVersion: "06983eaafac7f695", + expectInventory: true, expectedAssignedCDCs: 1, }, + { + name: "cp with inventory and available cdc deleted without hold", + existing: []runtime.Object{ + inventoryPoolBuilder().Build(testcp.WithSize(1)), + testcdc.FullBuilder( + testNamespace, "test-cdc-1", scheme, + ).GenericOptions(testgeneric.Deleted()).Build(), + }, + expectedTotalClusters: 0, + expectedObservedSize: 0, + expectedObservedReady: 0, + expectInventory: true, + expectError: true, + expectedCDCurrentStatus: corev1.ConditionUnknown, + }, + { + name: "cp with inventory and available cdc with finalizer deleted without hold", + existing: []runtime.Object{ + inventoryPoolBuilder().Build(testcp.WithSize(1)), + testcdc.FullBuilder( + testNamespace, "test-cdc-1", scheme, + ).GenericOptions( + testgeneric.Deleted(), + testgeneric.WithFinalizer(finalizer), + ).Build(), + }, + expectedTotalClusters: 0, + expectedObservedSize: 0, + expectedObservedReady: 0, + expectInventory: true, + expectError: true, + expectedCDCurrentStatus: corev1.ConditionUnknown, + }, + { + name: "cp with inventory and reserved cdc deletion on hold", + existing: []runtime.Object{ + inventoryPoolBuilder().Build(testcp.WithSize(1)), + testcd.FullBuilder("c1", "c1", scheme).Build( + testcd.WithPoolVersion(inventoryPoolVersion), + testcd.WithClusterPoolReference(testNamespace, testLeasePoolName, "claim"), + testcd.WithCustomization("test-cdc-1"), + testcd.Running(), + ), + testcdc.FullBuilder( + testNamespace, "test-cdc-1", scheme, + ).GenericOptions( + testgeneric.Deleted(), + testgeneric.WithFinalizer(finalizer), + ).Build( + testcdc.Reserved(), + ), + }, + expectedTotalClusters: 2, + expectedRunning: 1, + expectedObservedSize: 0, + expectedObservedReady: 0, + expectInventory: true, + expectError: false, + expectedCDCurrentStatus: corev1.ConditionTrue, + expectedAssignedCDs: 1, + expectedUnassignedClaims: 0, + expectedAssignedCDCs: 1, + }, { name: "cp with inventory and cdc doesn't exist is not valid - missing", existing: []runtime.Object{ - inventoryPoolBuilder.Build(testcp.WithSize(1)), + inventoryPoolBuilder().Build(testcp.WithSize(1)), }, - inventory: true, expectedTotalClusters: 0, expectedObservedSize: 0, expectedObservedReady: 0, expectedInventoryVaildStatus: corev1.ConditionFalse, expectedInventoryMessage: map[string][]string{"Missing": {"test-cdc-1"}}, expectedCDCurrentStatus: corev1.ConditionUnknown, - expectedPoolVersion: "06983eaafac7f695", + expectInventory: true, expectError: true, }, { name: "cp with inventory and cdc patch broken is not valid - BrokenBySyntax", existing: []runtime.Object{ - inventoryPoolBuilder.Build(testcp.WithSize(1)), + inventoryPoolBuilder().Build(testcp.WithSize(1)), testcdc.FullBuilder(testNamespace, "test-cdc-1", scheme).Build( testcdc.WithPatch("/broken/path", "replace", "x"), ), }, - inventory: true, expectedTotalClusters: 0, expectedObservedSize: 0, expectedObservedReady: 0, expectedInventoryVaildStatus: corev1.ConditionFalse, expectedInventoryMessage: map[string][]string{"BrokenBySyntax": {"test-cdc-1"}}, - expectedPoolVersion: "06983eaafac7f695", + expectInventory: true, expectedCDCurrentStatus: corev1.ConditionUnknown, expectError: true, }, { name: "cp with inventory and cd provisioning failed is not valid - BrokenByCloud", existing: []runtime.Object{ - inventoryPoolBuilder.Build(testcp.WithSize(1)), + inventoryPoolBuilder().Build(testcp.WithSize(1)), testcdc.FullBuilder(testNamespace, "test-cdc-1", scheme).Build(), testcd.FullBuilder("c1", "c1", scheme).Build( - testcd.WithPoolVersion("06983eaafac7f695"), + testcd.WithPoolVersion("e0bc44f74a546c63"), testcd.WithUnclaimedClusterPoolReference(testNamespace, testLeasePoolName), testcd.WithCustomization("test-cdc-1"), testcd.Broken(), ), }, - inventory: true, expectedTotalClusters: 0, expectedObservedSize: 1, expectedObservedReady: 0, expectedInventoryVaildStatus: corev1.ConditionFalse, expectedInventoryMessage: map[string][]string{"BrokenByCloud": {"test-cdc-1"}}, - expectedPoolVersion: "06983eaafac7f695", + expectInventory: true, expectedAssignedCDCs: 1, }, { name: "cp with inventory and good cdc is valid, cd created", existing: []runtime.Object{ - inventoryPoolBuilder.Build(testcp.WithSize(1)), + inventoryPoolBuilder().Build(testcp.WithSize(1)), testcdc.FullBuilder(testNamespace, "test-cdc-1", scheme).Build(), unclaimedCDBuilder("c1").Build( testcd.WithCustomization("test-cdc-1"), testcd.Running(), ), }, - inventory: true, expectedTotalClusters: 0, expectedObservedSize: 1, expectedObservedReady: 1, expectedInventoryVaildStatus: corev1.ConditionTrue, - expectedPoolVersion: "06983eaafac7f695", + expectInventory: true, expectedAssignedCDCs: 1, }, { - // Second prioritize by last used name: "cp with inventory - correct prioritization - same status", existing: []runtime.Object{ initializedPoolBuilder.Build( @@ -314,14 +371,13 @@ func TestReconcileClusterPool(t *testing.T) { ), testcdc.FullBuilder(testNamespace, "test-cdc-unused-new", scheme).Build(), }, - expectedTotalClusters: 1, - expectedInventoryVaildStatus: corev1.ConditionTrue, - expectedPoolVersion: "06983eaafac7f695", - expectedInventoryNext: "test-cdc-successful-old", - expectedAssignedCDCs: 1, + expectedTotalClusters: 1, + expectedInventoryVaildStatus: corev1.ConditionTrue, + expectInventory: true, + expectedInventoryAssignmentOrder: []string{"test-cdc-successful-old"}, + expectedAssignedCDCs: 1, }, { - // First prioritize unused/successful vs broken (cloud/syntax) name: "cp with inventory - correct prioritization - successful vs broken", existing: []runtime.Object{ initializedPoolBuilder.Build( @@ -329,22 +385,22 @@ func TestReconcileClusterPool(t *testing.T) { testcp.WithInventory([]string{"test-cdc-successful-new", "test-cdc-broken-old"}), ), testcdc.FullBuilder(testNamespace, "test-cdc-broken-old", scheme).Build( - testcdc.WithApplySucceeded(hivev1.CustomizationApplyReasonSucceeded, nowish.Add(-time.Hour)), + testcdc.WithApplySucceeded(hivev1.CustomizationApplyReasonBrokenCloud, nowish.Add(-time.Hour)), ), testcdc.FullBuilder(testNamespace, "test-cdc-successful-new", scheme).Build( - testcdc.WithApplySucceeded(hivev1.CustomizationApplyReasonBrokenCloud, nowish), + testcdc.WithApplySucceeded(hivev1.CustomizationApplyReasonSucceeded, nowish), ), }, - expectedTotalClusters: 1, - expectedInventoryVaildStatus: corev1.ConditionFalse, - expectedPoolVersion: "06983eaafac7f695", - expectedInventoryNext: "test-cdc-successful-new", - expectedAssignedCDCs: 1, + expectedTotalClusters: 1, + expectedInventoryVaildStatus: corev1.ConditionFalse, + expectInventory: true, + expectedInventoryAssignmentOrder: []string{"test-cdc-successful-new"}, + expectedAssignedCDCs: 1, }, { name: "cp with inventory - release cdc when cd is missing", existing: []runtime.Object{ - inventoryPoolBuilder.Build( + inventoryPoolBuilder().Build( testcp.WithSize(1), testcp.WithInventory([]string{"test-cdc-1"}), ), @@ -356,13 +412,13 @@ func TestReconcileClusterPool(t *testing.T) { ), }, expectedTotalClusters: 1, - expectedPoolVersion: "06983eaafac7f695", + expectInventory: true, expectedAssignedCDCs: 1, // The CDC will be assigned to a new cluster }, { name: "cp with inventory - fix cdc when cd reference exists", existing: []runtime.Object{ - inventoryPoolBuilder.Build( + inventoryPoolBuilder().Build( testcp.WithSize(1), testcp.WithInventory([]string{"test-cdc-1"}), ), @@ -376,7 +432,7 @@ func TestReconcileClusterPool(t *testing.T) { }, expectedTotalClusters: 1, expectedObservedSize: 1, - expectedPoolVersion: "06983eaafac7f695", + expectInventory: true, expectedAssignedCDCs: 1, expectedCDCurrentStatus: corev1.ConditionUnknown, }, @@ -1632,7 +1688,11 @@ func TestReconcileClusterPool(t *testing.T) { ) } if test.expectedPoolVersion == "" { - test.expectedPoolVersion = initialPoolVersion + if test.expectInventory { + test.expectedPoolVersion = inventoryPoolVersion + } else { + test.expectedPoolVersion = initialPoolVersion + } } fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(test.existing...).Build() logger := log.New() @@ -1657,8 +1717,6 @@ func TestReconcileClusterPool(t *testing.T) { } else { assert.NoError(t, err, "expected no error from reconcile") } - cdcs := &hivev1.ClusterDeploymentCustomizationList{} - fakeClient.List(context.Background(), cdcs) pool := &hivev1.ClusterPool{} err = fakeClient.Get(context.Background(), client.ObjectKey{Namespace: testNamespace, Name: testLeasePoolName}, pool) @@ -1805,20 +1863,32 @@ func TestReconcileClusterPool(t *testing.T) { assert.Equal(t, test.expectedUnassignedClaims, actualUnassignedClaims, "unexpected number of unassigned claims") actualAssignedCDCs := 0 - // cdcs := &hivev1.ClusterDeploymentCustomizationList{} + cdcs := &hivev1.ClusterDeploymentCustomizationList{} err = fakeClient.List(context.Background(), cdcs) require.NoError(t, err) + cdcMap := make(map[string]hivev1.ClusterDeploymentCustomization, len(cdcs.Items)) for _, cdc := range cdcs.Items { - if test.expectedInventoryNext != "" { - if cdc.Status.ClusterPoolRef != nil && cdc.Status.ClusterPoolRef.Name == testLeasePoolName { - assert.Equal(t, test.expectedInventoryNext, cdc.Name, "unexpected inventory next") - } - } + cdcMap[cdc.Name] = cdc + condition := conditionsv1.FindStatusCondition(cdc.Status.Conditions, conditionsv1.ConditionAvailable) if condition != nil && condition.Status == corev1.ConditionFalse { actualAssignedCDCs++ } } + + if order := test.expectedInventoryAssignmentOrder; order != nil && len(order) > 0 { + lastTime := metav1.NewTime(nowish.Add(24 * -time.Hour)) + for _, cdcName := range order { + cdc := cdcMap[cdcName] + condition := conditionsv1.FindStatusCondition(cdc.Status.Conditions, conditionsv1.ConditionAvailable) + if condition == nil || condition.Status == corev1.ConditionUnknown || condition.Status == corev1.ConditionTrue { + assert.Failf(t, "expected CDC %s to be assigned", cdcName) + } + assert.True(t, lastTime.Before(&condition.LastTransitionTime), "expected %s to be before %s", lastTime, condition.LastTransitionTime) + lastTime = condition.LastTransitionTime + } + } + assert.Equal(t, test.expectedAssignedCDCs, actualAssignedCDCs, "unexpected number of assigned CDCs") }) } @@ -2579,6 +2649,8 @@ func TestReconcileRBAC(t *testing.T) { func Test_isBroken(t *testing.T) { logger := log.New() + basicPool := hivev1.ClusterPool{} + poolNoHibernationConfig := hivev1.ClusterPool{ Spec: hivev1.ClusterPoolSpec{}, } @@ -2772,6 +2844,20 @@ func Test_isBroken(t *testing.T) { pool: &poolWithTimeout, want: true, }, + { + name: "ClusterDeploymentCustomization was removed", + cd: testcd.BasicBuilder().Options( + testcd.WithCondition( + hivev1.ClusterDeploymentCondition{ + Type: hivev1.ProvisionStoppedCondition, + Status: corev1.ConditionFalse, + }), + testcd.WithClusterPoolReference(testNamespace, "cp", ""), + testcd.WithCustomization("test-cdc-1"), + ).Build(), + pool: &basicPool, + want: true, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/pkg/controller/clusterpool/collections.go b/pkg/controller/clusterpool/collections.go index db3d0f80311..45e139dde0e 100644 --- a/pkg/controller/clusterpool/collections.go +++ b/pkg/controller/clusterpool/collections.go @@ -231,6 +231,8 @@ func isBroken(cd *hivev1.ClusterDeployment, pool *hivev1.ClusterPool, logger log logger.Infof("Cluster %s is broken due to ProvisionStopped", cd.Name) return true } + + // Check if CD's customization exists in CP invnetory if cd.Spec.ClusterPoolRef != nil && cd.Spec.ClusterPoolRef.CustomizationRef != nil { customizationExists := false cdcName := cd.Spec.ClusterPoolRef.CustomizationRef.Name @@ -240,7 +242,11 @@ func isBroken(cd *hivev1.ClusterDeployment, pool *hivev1.ClusterPool, logger log } } if !customizationExists { - logger.Infof("Cluster %s is broken due to removed customization %s", cd.Name, cdcName) + logger.WithFields(log.Fields{ + "ClusterDeployment": cd.Name, + "ClusterDeploymentCustomization": cdcName, + "ClusterPool": pool.Name, + }).Info("Cluster is broken due to a removed customization from pool's inventory") return true } } @@ -638,7 +644,7 @@ func getAllCustomizationsForPool(c client.Client, pool *hivev1.ClusterPool, logg if err := c.List( context.Background(), cdcList, client.InNamespace(pool.Namespace)); err != nil { - logger.WithError(err).Error("error listing ClusterDeploymentCustomizations") + logger.WithField("namespace", pool.Namespace).WithError(err).Error("error listing ClusterDeploymentCustomizations") return &cdcCollection{}, err } @@ -683,11 +689,11 @@ func getAllCustomizationsForPool(c client.Client, pool *hivev1.ClusterPool, logg cdcCol.Sort() logger.WithFields(log.Fields{ - "reservedCount": len(cdcCol.reserved), "unassignedCount": len(cdcCol.unassigned), + "missingCount": len(cdcCol.missing), + "reservedCount": len(cdcCol.reserved), "brokenByCloudCount": len(cdcCol.cloud), "brokenBySyntaxCount": len(cdcCol.syntax), - "missingCount": len(cdcCol.missing), }).Debug("found ClusterDeploymentCustomizations for ClusterPool") return &cdcCol, nil @@ -723,17 +729,16 @@ func (cdcs *cdcCollection) Sort() { return iTime.Before(&jTime) } if iStatus.Reason == hivev1.CustomizationApplyReasonSucceeded { - return false + return true } if jStatus.Reason == hivev1.CustomizationApplyReasonSucceeded { - return true + return false } return iName < jName }, ) } -// Reserve func (cdcs *cdcCollection) Reserve(c client.Client, cdc *hivev1.ClusterDeploymentCustomization, cdName, poolName string) error { cdc.Status.ClusterDeploymentRef = &corev1.LocalObjectReference{Name: cdName} cdc.Status.ClusterPoolRef = &corev1.LocalObjectReference{Name: poolName} @@ -766,7 +771,6 @@ func (cdcs *cdcCollection) Reserve(c client.Client, cdc *hivev1.ClusterDeploymen return nil } -// Unassign func (cdcs *cdcCollection) Unassign(c client.Client, cdc *hivev1.ClusterDeploymentCustomization) error { changed := conditionsv1.SetStatusConditionNoHeartbeat(&cdc.Status.Conditions, conditionsv1.Condition{ Type: conditionsv1.ConditionAvailable, @@ -872,8 +876,13 @@ func (cdcs *cdcCollection) InstallationPending(c client.Client, cdc *hivev1.Clus return nil } -// SyncClusterDeploymentCustomizations makes sure that CDCs relagted to the pool, the pool inventory and related ClusterDeployments -// are in the correct state. +// SyncClusterDeploymentCustomizations updates CDCs and related CR status: +// - Handle deletion of CDC in the namespace +// - If there is no CD, but CDC is reserved, then we release the CDC +// - Make sure that CD <=> CDC links are legit; repair them if not. +// - Notice a Broken CD => update the CDC's ApplySucceeded condition to BrokenByCloud; +// - Notice a CD has finished installing => update the CDC's ApplySucceeded condition to Success; +// - Update ClusterPool InventoryValid condition func (cdcs *cdcCollection) SyncClusterDeploymentCustomizationAssignments(c client.Client, pool *hivev1.ClusterPool, cds *cdCollection, logger log.FieldLogger) error { if pool.Spec.Inventory == nil { return nil @@ -883,8 +892,8 @@ func (cdcs *cdcCollection) SyncClusterDeploymentCustomizationAssignments(c clien for _, cdc := range cdcs.namespace { isDeleted := cdc.DeletionTimestamp != nil hasFinalizer := controllerutils.HasFinalizer(cdc, finalizer) - isAvailable := conditionsv1.IsStatusConditionTrue(cdc.Status.Conditions, conditionsv1.ConditionAvailable) - if isDeleted && isAvailable { + isAvailable := conditionsv1.FindStatusCondition(cdc.Status.Conditions, conditionsv1.ConditionAvailable) + if isDeleted && (isAvailable == nil || isAvailable.Status != corev1.ConditionFalse) { // We can delete the finalizer for a deleted CDC only if it is not reserved if hasFinalizer { controllerutils.DeleteFinalizer(cdc, finalizer) @@ -904,7 +913,6 @@ func (cdcs *cdcCollection) SyncClusterDeploymentCustomizationAssignments(c clien } // If there is no CD, but CDC is reserved, then we release the CDC - // Last apply condition preserved for _, cdc := range cdcs.reserved { if cds.ByName(cdc.Status.ClusterDeploymentRef.Name) == nil { if err := cdcs.Unassign(c, cdc); err != nil { @@ -924,7 +932,11 @@ func (cdcs *cdcCollection) SyncClusterDeploymentCustomizationAssignments(c clien // CDC exists cdc, ok := cdcs.namespace[cpRef.CustomizationRef.Name] if !ok { - logger.Warningf("CD %s has CDC %s that doesn't exist, this is a bug", cd.Name, cpRef.CustomizationRef.Name) + logger.WithFields(log.Fields{ + "clusterdeployment": cd.Name, + "clusterdeploymentcustomization": cpRef.CustomizationRef.Name, + "namespace": cpRef.Namespace, + }).Warning("CD has reference to a CDC that doesn't exist, this is a bug") continue } @@ -944,6 +956,12 @@ func (cdcs *cdcCollection) SyncClusterDeploymentCustomizationAssignments(c clien } } else { // Fixing reservation should be done by the appropriate cluster pool + logger.WithFields(log.Fields{ + "clusterdeployment": cd.Name, + "parallelclusterdeployment": cdOther.Name, + "clusterdeploymentcustomization": cdc.Name, + "namespace": cdc.Namespace, + }).Warning("Another CD exists and has this CDC reserved") continue } } @@ -960,7 +978,7 @@ func (cdcs *cdcCollection) SyncClusterDeploymentCustomizationAssignments(c clien } } - // Notice a Broken CD => update the CDC's ApplySucceded condition to BrokenByCloud; + // Notice a Broken CD => update the CDC's ApplySucceeded condition to BrokenByCloud; for _, cd := range cds.Broken() { cdcRef := cd.Spec.ClusterPoolRef.CustomizationRef if cdcRef == nil { @@ -971,7 +989,11 @@ func (cdcs *cdcCollection) SyncClusterDeploymentCustomizationAssignments(c clien return err } } else { - logger.Warningf("CD %s has CDC %s that doesn't exist, this is a bug", cd.Name, cdcRef.Name) + logger.WithFields(log.Fields{ + "clusterdeployment": cd.Name, + "clusterdeploymentcustomization": cdcRef.Name, + "namespace": cd.Spec.ClusterPoolRef.Namespace, + }).Warning("CD has reference to a CDC that doesn't exist, this is a bug") } } @@ -986,10 +1008,17 @@ func (cdcs *cdcCollection) SyncClusterDeploymentCustomizationAssignments(c clien return err } } else { - logger.Warningf("CD %s has CDC %s that doesn't exist, this is a bug", cd.Name, cdcRef.Name) + logger.WithFields(log.Fields{ + "clusterdeployment": cd.Name, + "clusterdeploymentcustomization": cdcRef.Name, + "namespace": cd.Spec.ClusterPoolRef.Namespace, + }).Warning("CD has reference to a CDC that doesn't exist, this is a bug") } } + cdcs.Sort() + + // Update ClusterPool InventoryValid condition if err := cdcs.UpdateInventoryValidCondition(c, pool); err != nil { return err } @@ -997,7 +1026,6 @@ func (cdcs *cdcCollection) SyncClusterDeploymentCustomizationAssignments(c clien return nil } -// Update Cluster Pool Inventory Condition func (cdcs *cdcCollection) UpdateInventoryValidCondition(c client.Client, pool *hivev1.ClusterPool) error { message := "" status := corev1.ConditionTrue @@ -1033,6 +1061,7 @@ func (cdcs *cdcCollection) UpdateInventoryValidCondition(c client.Client, pool * var _ json.Marshaler = &cdcCollection{} +// MarshalJSON cdcs implements the InventoryValid condition message func (cdcs *cdcCollection) MarshalJSON() ([]byte, error) { cloud := []string{} for _, cdc := range cdcs.cloud { diff --git a/pkg/controller/hibernation/hibernation_controller_test.go b/pkg/controller/hibernation/hibernation_controller_test.go index a708f6ef763..67c3495891a 100644 --- a/pkg/controller/hibernation/hibernation_controller_test.go +++ b/pkg/controller/hibernation/hibernation_controller_test.go @@ -285,7 +285,6 @@ func TestReconcile(t *testing.T) { assert.Equal(t, hivev1.ReadyReasonStoppingOrHibernating, runCond.Reason) }, }, - { name: "start hibernating, syncsets not applied", cd: cdBuilder.Options(o.shouldHibernate, testcd.InstalledTimestamp(time.Now())).Build(), diff --git a/pkg/validating-webhooks/hive/v1/clusterdeploymentcustomization_validating_admission_hook.go b/pkg/validating-webhooks/hive/v1/clusterdeploymentcustomization_validating_admission_hook.go index bfb7069bd54..2f2d31da7eb 100644 --- a/pkg/validating-webhooks/hive/v1/clusterdeploymentcustomization_validating_admission_hook.go +++ b/pkg/validating-webhooks/hive/v1/clusterdeploymentcustomization_validating_admission_hook.go @@ -5,6 +5,7 @@ import ( "net/http" "strings" + yamlpatch "github.com/krishicks/yaml-patch" log "github.com/sirupsen/logrus" admissionv1beta1 "k8s.io/api/admission/v1beta1" @@ -260,7 +261,7 @@ func validateInstallConfigPatches(path *field.Path, patches []hivev1.PatchEntity allErrs := field.ErrorList{} for i, patch := range patches { - if !isValidOP(patch.Op) { + if !isValidOP(yamlpatch.Op(patch.Op)) { allErrs = append(allErrs, field.Invalid(path.Index(i), patch, "install config patch op must be a valid json patch operation")) } if len(patch.Path) == 0 || !strings.HasPrefix(patch.Path, "/") { @@ -270,15 +271,15 @@ func validateInstallConfigPatches(path *field.Path, patches []hivev1.PatchEntity return allErrs } -func isValidOP(op string) bool { +func isValidOP(op yamlpatch.Op) bool { switch op { case - "replace", - "add", - "remove", - "test", - "copy", - "move": + yamlpatch.OpAdd, + yamlpatch.OpRemove, + yamlpatch.OpMove, + yamlpatch.OpCopy, + yamlpatch.OpTest, + yamlpatch.OpReplace: return true } return false diff --git a/pkg/validating-webhooks/hive/v1/clusterpool_validating_admission_hook.go b/pkg/validating-webhooks/hive/v1/clusterpool_validating_admission_hook.go index 47d706fee15..1302c8dbad0 100644 --- a/pkg/validating-webhooks/hive/v1/clusterpool_validating_admission_hook.go +++ b/pkg/validating-webhooks/hive/v1/clusterpool_validating_admission_hook.go @@ -179,10 +179,6 @@ func (a *ClusterPoolValidatingAdmissionHook) validateCreate(admissionSpec *admis allErrs = append(allErrs, validateClusterPlatform(specPath, newObject.Spec.Platform)...) - if newObject.Spec.Inventory != nil { - allErrs = append(allErrs, validateInventory(specPath, newObject.Spec.Inventory)...) - } - if len(allErrs) > 0 { status := errors.NewInvalid(schemaGVK(admissionSpec.Kind).GroupKind(), admissionSpec.Name, allErrs).Status() return &admissionv1beta1.AdmissionResponse{ @@ -243,10 +239,6 @@ func (a *ClusterPoolValidatingAdmissionHook) validateUpdate(admissionSpec *admis allErrs = append(allErrs, validateClusterPlatform(specPath, newObject.Spec.Platform)...) - if newObject.Spec.Inventory != nil { - allErrs = append(allErrs, validateInventory(specPath, newObject.Spec.Inventory)...) - } - if len(allErrs) > 0 { contextLogger.WithError(allErrs.ToAggregate()).Info("failed validation") status := errors.NewInvalid(schemaGVK(admissionSpec.Kind).GroupKind(), admissionSpec.Name, allErrs).Status() @@ -262,11 +254,3 @@ func (a *ClusterPoolValidatingAdmissionHook) validateUpdate(admissionSpec *admis Allowed: true, } } - -func validateInventory(path *field.Path, inventory []hivev1.InventoryEntry) field.ErrorList { - allErrs := field.ErrorList{} - if len(inventory) == 0 { - allErrs = append(allErrs, field.Invalid(path, inventory, "inventory can't be empty")) - } - return allErrs -} diff --git a/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeployment_types.go b/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeployment_types.go index e83976cf855..07eaa045b8f 100644 --- a/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeployment_types.go +++ b/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeployment_types.go @@ -52,10 +52,6 @@ const ( // FinalizerArgoCDCluster is used on ClusterDeployments to ensure we clean up the ArgoCD cluster // secret before cleaning up the API object. FinalizerArgoCDCluster = "hive.openshift.io/argocd-cluster" - - // FinalizerCustomizationRelease is used on ClusterDeployments to ensure we run a successful - // release of ClusterDeploymentCustomization - FinalizerCustomizationRelease string = "hive.openshift.io/customization" ) // ClusterPowerState is used to indicate whether a cluster is running or in a diff --git a/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go b/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go index 6d280e9a7da..71a6351445c 100644 --- a/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go +++ b/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go @@ -70,6 +70,7 @@ type ClusterDeploymentCustomizationStatus struct { ClusterPoolRef *corev1.LocalObjectReference `json:"clusterPoolRef,omitempty"` // LastAppliedConfiguration contains the last applied patches to the install-config. + // The information will retain for reference in case the customization is updated. // +optional LastAppliedConfiguration string `json:"lastAppliedConfiguration,omitempty"` From 033d97830185342a40b59b5c1640acd96d58c2af Mon Sep 17 00:00:00 2001 From: Alexander Braverman Masis Date: Sun, 29 May 2022 12:01:41 +0300 Subject: [PATCH 10/27] More fixes and e2e test --- .../clusterdeploymentcustomization_types.go | 4 +- hack/e2e-pool-test.sh | 50 ++++++++ .../clusterdeployment_controller_test.go | 22 +++- .../clusterpool/clusterpool_controller.go | 30 ++--- .../clusterpool_controller_test.go | 109 ++++++++---------- pkg/controller/clusterpool/collections.go | 20 +++- pkg/test/assert/assertions.go | 27 +++++ 7 files changed, 176 insertions(+), 86 deletions(-) diff --git a/apis/hive/v1/clusterdeploymentcustomization_types.go b/apis/hive/v1/clusterdeploymentcustomization_types.go index 71a6351445c..8917e756c51 100644 --- a/apis/hive/v1/clusterdeploymentcustomization_types.go +++ b/apis/hive/v1/clusterdeploymentcustomization_types.go @@ -12,10 +12,10 @@ const ( CustomizationApplyReasonSucceeded = "Succeeded" // CustomizationApplyReasonBrokenSyntax indicates that Hive failed to apply // customization patches on install-config. More details would be found in - // Valid condition message. + // ApplySucceded condition message. CustomizationApplyReasonBrokenSyntax = "BrokenBySyntax" // CustomizationApplyReasonBrokenCloud indicates that cluster deployment provision has failed - // when using this customization. More details would be found in the Valid condition message. + // when using this customization. More details would be found in the ApplySucceeded condition message. CustomizationApplyReasonBrokenCloud = "BrokenByCloud" // CustomizationApplyReasonInstallationPending indicates that the customization patches have // been successfully applied but provisioning is not completed yet. diff --git a/hack/e2e-pool-test.sh b/hack/e2e-pool-test.sh index d1cdadc4481..a50bfc4471c 100755 --- a/hack/e2e-pool-test.sh +++ b/hack/e2e-pool-test.sh @@ -23,6 +23,24 @@ spec: EOF } +function create_customization() { + local is_name=$1 + local ns=$2 + echo "Creating ClusterDeploymentCustomization $is_name" + oc apply -f -< 0: toAdd := minIntVarible(-drift, availableCapacity, availableCurrent) + if clp.Spec.Inventory != nil { + toAdd = minIntVarible(toAdd, len(cdcs.Unassigned())) + } if err := r.addClusters(clp, poolVersion, cds, toAdd, cdcs, logger); err != nil { log.WithError(err).Error("error adding clusters") return reconcile.Result{}, err @@ -710,12 +720,6 @@ func (r *ReconcileClusterPool) createCluster( cdcs *cdcCollection, logger log.FieldLogger, ) (*hivev1.ClusterDeployment, error) { - if clp.Spec.Inventory != nil { - if len(cdcs.unassigned) == 0 { - return nil, errors.New("no customization available") - } - } - var err error ns, err := r.createRandomNamespace(clp) diff --git a/pkg/controller/clusterpool/clusterpool_controller_test.go b/pkg/controller/clusterpool/clusterpool_controller_test.go index 43d0dcb2641..24cc10a8b13 100644 --- a/pkg/controller/clusterpool/clusterpool_controller_test.go +++ b/pkg/controller/clusterpool/clusterpool_controller_test.go @@ -3,6 +3,7 @@ package clusterpool import ( "context" "encoding/json" + "regexp" "sort" "testing" "time" @@ -121,13 +122,14 @@ func TestReconcileClusterPool(t *testing.T) { expectedMissingDependenciesStatus corev1.ConditionStatus expectedCapacityStatus corev1.ConditionStatus expectedCDCurrentStatus corev1.ConditionStatus - expectedInventoryVaildStatus corev1.ConditionStatus + expectedInventoryValidStatus corev1.ConditionStatus expectedInventoryMessage map[string][]string + expectedCDCReason map[string]string expectedMissingDependenciesMessage string expectedAssignedClaims int expectedUnassignedClaims int expectedAssignedCDs int - expectedAssignedCDCs int + expectedAssignedCDCs map[string]string expectedRunning int expectedLabels map[string]string // Tested on all clusters, so will not work if your test has pre-existing cds in the pool. // Map, keyed by claim name, of expected Status.Conditions['Pending'].Reason. @@ -136,7 +138,6 @@ func TestReconcileClusterPool(t *testing.T) { expectedClaimPendingReasons map[string]string expectedInventoryAssignmentOrder []string expectPoolVersionChanged bool - expectedPoolVersion string }{ { name: "initialize conditions", @@ -146,7 +147,7 @@ func TestReconcileClusterPool(t *testing.T) { expectedMissingDependenciesStatus: corev1.ConditionUnknown, expectedCapacityStatus: corev1.ConditionUnknown, expectedCDCurrentStatus: corev1.ConditionUnknown, - expectedInventoryVaildStatus: corev1.ConditionUnknown, + expectedInventoryValidStatus: corev1.ConditionUnknown, }, { name: "copyover fields", @@ -192,29 +193,6 @@ func TestReconcileClusterPool(t *testing.T) { }, expectPoolVersionChanged: true, }, - { - name: "poolVersion changes with new Inventory", - existing: []runtime.Object{ - initializedPoolBuilder.Build(testcp.WithInventory([]string{"test-cdc-1"})), - }, - expectPoolVersionChanged: true, - }, - { - name: "poolVersion doens't change with existing inventory when entry added", - existing: []runtime.Object{ - inventoryPoolBuilder().Build(testcp.WithInventory([]string{"test-cdc-1", "test-cdc-2"})), - }, - expectPoolVersionChanged: false, - expectInventory: true, - }, - { - name: "poolVersion changes when inventory removed", - existing: []runtime.Object{ - inventoryPoolBuilder().Build(testcp.WithInventory([]string{})), - }, - expectPoolVersionChanged: true, - expectInventory: true, - }, { name: "cp with inventory and cdc exists is valid", existing: []runtime.Object{ @@ -224,9 +202,10 @@ func TestReconcileClusterPool(t *testing.T) { expectedTotalClusters: 1, expectedObservedSize: 0, expectedObservedReady: 0, - expectedInventoryVaildStatus: corev1.ConditionTrue, + expectedInventoryValidStatus: corev1.ConditionTrue, expectInventory: true, - expectedAssignedCDCs: 1, + expectedAssignedCDCs: map[string]string{"test-cdc-1": testLeasePoolName}, + expectedCDCReason: map[string]string{"test-cdc-1": hivev1.CustomizationApplyReasonInstallationPending}, }, { name: "cp with inventory and available cdc deleted without hold", @@ -265,7 +244,7 @@ func TestReconcileClusterPool(t *testing.T) { name: "cp with inventory and reserved cdc deletion on hold", existing: []runtime.Object{ inventoryPoolBuilder().Build(testcp.WithSize(1)), - testcd.FullBuilder("c1", "c1", scheme).Build( + testcd.FullBuilder(testNamespace, "c1", scheme).Build( testcd.WithPoolVersion(inventoryPoolVersion), testcd.WithClusterPoolReference(testNamespace, testLeasePoolName, "claim"), testcd.WithCustomization("test-cdc-1"), @@ -277,10 +256,12 @@ func TestReconcileClusterPool(t *testing.T) { testgeneric.Deleted(), testgeneric.WithFinalizer(finalizer), ).Build( + testcdc.WithPool(testLeasePoolName), + testcdc.WithCD("c1"), testcdc.Reserved(), ), }, - expectedTotalClusters: 2, + expectedTotalClusters: 1, expectedRunning: 1, expectedObservedSize: 0, expectedObservedReady: 0, @@ -289,7 +270,7 @@ func TestReconcileClusterPool(t *testing.T) { expectedCDCurrentStatus: corev1.ConditionTrue, expectedAssignedCDs: 1, expectedUnassignedClaims: 0, - expectedAssignedCDCs: 1, + expectedAssignedCDCs: map[string]string{"test-cdc-1": "c1"}, }, { name: "cp with inventory and cdc doesn't exist is not valid - missing", @@ -299,11 +280,11 @@ func TestReconcileClusterPool(t *testing.T) { expectedTotalClusters: 0, expectedObservedSize: 0, expectedObservedReady: 0, - expectedInventoryVaildStatus: corev1.ConditionFalse, + expectedInventoryValidStatus: corev1.ConditionFalse, expectedInventoryMessage: map[string][]string{"Missing": {"test-cdc-1"}}, - expectedCDCurrentStatus: corev1.ConditionUnknown, + expectedCDCurrentStatus: corev1.ConditionTrue, // huh? expectInventory: true, - expectError: true, + expectError: false, }, { name: "cp with inventory and cdc patch broken is not valid - BrokenBySyntax", @@ -316,8 +297,9 @@ func TestReconcileClusterPool(t *testing.T) { expectedTotalClusters: 0, expectedObservedSize: 0, expectedObservedReady: 0, - expectedInventoryVaildStatus: corev1.ConditionFalse, + expectedInventoryValidStatus: corev1.ConditionFalse, expectedInventoryMessage: map[string][]string{"BrokenBySyntax": {"test-cdc-1"}}, + expectedCDCReason: map[string]string{"test-cdc-1": hivev1.CustomizationApplyReasonBrokenSyntax}, expectInventory: true, expectedCDCurrentStatus: corev1.ConditionUnknown, expectError: true, @@ -337,10 +319,11 @@ func TestReconcileClusterPool(t *testing.T) { expectedTotalClusters: 0, expectedObservedSize: 1, expectedObservedReady: 0, - expectedInventoryVaildStatus: corev1.ConditionFalse, + expectedInventoryValidStatus: corev1.ConditionFalse, expectedInventoryMessage: map[string][]string{"BrokenByCloud": {"test-cdc-1"}}, + expectedCDCReason: map[string]string{"test-cdc-1": hivev1.CustomizationApplyReasonBrokenCloud}, expectInventory: true, - expectedAssignedCDCs: 1, + expectedAssignedCDCs: map[string]string{"test-cdc-1": "c1"}, }, { name: "cp with inventory and good cdc is valid, cd created", @@ -355,9 +338,10 @@ func TestReconcileClusterPool(t *testing.T) { expectedTotalClusters: 0, expectedObservedSize: 1, expectedObservedReady: 1, - expectedInventoryVaildStatus: corev1.ConditionTrue, + expectedInventoryValidStatus: corev1.ConditionTrue, + expectedCDCReason: map[string]string{"test-cdc-1": hivev1.CustomizationApplyReasonSucceeded}, expectInventory: true, - expectedAssignedCDCs: 1, + expectedAssignedCDCs: map[string]string{"test-cdc-1": "c1"}, }, { name: "cp with inventory - correct prioritization - same status", @@ -372,10 +356,10 @@ func TestReconcileClusterPool(t *testing.T) { testcdc.FullBuilder(testNamespace, "test-cdc-unused-new", scheme).Build(), }, expectedTotalClusters: 1, - expectedInventoryVaildStatus: corev1.ConditionTrue, + expectedInventoryValidStatus: corev1.ConditionTrue, expectInventory: true, expectedInventoryAssignmentOrder: []string{"test-cdc-successful-old"}, - expectedAssignedCDCs: 1, + expectedAssignedCDCs: map[string]string{"test-cdc-successful-old": ""}, }, { name: "cp with inventory - correct prioritization - successful vs broken", @@ -392,10 +376,10 @@ func TestReconcileClusterPool(t *testing.T) { ), }, expectedTotalClusters: 1, - expectedInventoryVaildStatus: corev1.ConditionFalse, + expectedInventoryValidStatus: corev1.ConditionFalse, expectInventory: true, expectedInventoryAssignmentOrder: []string{"test-cdc-successful-new"}, - expectedAssignedCDCs: 1, + expectedAssignedCDCs: map[string]string{"test-cdc-successful-new": ""}, }, { name: "cp with inventory - release cdc when cd is missing", @@ -413,7 +397,7 @@ func TestReconcileClusterPool(t *testing.T) { }, expectedTotalClusters: 1, expectInventory: true, - expectedAssignedCDCs: 1, // The CDC will be assigned to a new cluster + expectedAssignedCDCs: map[string]string{"test-cdc-1": ""}, }, { name: "cp with inventory - fix cdc when cd reference exists", @@ -433,7 +417,7 @@ func TestReconcileClusterPool(t *testing.T) { expectedTotalClusters: 1, expectedObservedSize: 1, expectInventory: true, - expectedAssignedCDCs: 1, + expectedAssignedCDCs: map[string]string{"test-cdc-1": "c1"}, expectedCDCurrentStatus: corev1.ConditionUnknown, }, { @@ -1687,13 +1671,11 @@ func TestReconcileClusterPool(t *testing.T) { Build(testsecret.WithDataKeyValue("dummykey", []byte("dummyval"))), ) } - if test.expectedPoolVersion == "" { - if test.expectInventory { - test.expectedPoolVersion = inventoryPoolVersion - } else { - test.expectedPoolVersion = initialPoolVersion - } + expectedPoolVersion := initialPoolVersion + if test.expectInventory { + expectedPoolVersion = inventoryPoolVersion } + fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(test.existing...).Build() logger := log.New() logger.SetLevel(log.DebugLevel) @@ -1728,7 +1710,6 @@ func TestReconcileClusterPool(t *testing.T) { assert.Equal(t, test.expectedObservedSize, pool.Status.Size, "unexpected observed size") assert.Equal(t, test.expectedObservedReady, pool.Status.Ready, "unexpected observed ready count") currentPoolVersion := calculatePoolVersion(pool) - expectedPoolVersion := test.expectedPoolVersion assert.Equal( t, test.expectPoolVersionChanged, currentPoolVersion != expectedPoolVersion, "expectPoolVersionChanged is %t\ninitial %q\nfinal %q", @@ -1763,10 +1744,10 @@ func TestReconcileClusterPool(t *testing.T) { } } - if test.expectedInventoryVaildStatus != "" { + if test.expectedInventoryValidStatus != "" { inventoryValidCondition := controllerutils.FindClusterPoolCondition(pool.Status.Conditions, hivev1.ClusterPoolInventoryValidCondition) if assert.NotNil(t, inventoryValidCondition, "did not find InventoryValid condition") { - assert.Equal(t, test.expectedInventoryVaildStatus, inventoryValidCondition.Status, + assert.Equal(t, test.expectedInventoryValidStatus, inventoryValidCondition.Status, "unexpcted InventoryValid condition status %s", inventoryValidCondition.Message) } } @@ -1862,7 +1843,6 @@ func TestReconcileClusterPool(t *testing.T) { assert.Equal(t, test.expectedAssignedClaims, actualAssignedClaims, "unexpected number of assigned claims") assert.Equal(t, test.expectedUnassignedClaims, actualUnassignedClaims, "unexpected number of unassigned claims") - actualAssignedCDCs := 0 cdcs := &hivev1.ClusterDeploymentCustomizationList{} err = fakeClient.List(context.Background(), cdcs) require.NoError(t, err) @@ -1870,9 +1850,18 @@ func TestReconcileClusterPool(t *testing.T) { for _, cdc := range cdcs.Items { cdcMap[cdc.Name] = cdc - condition := conditionsv1.FindStatusCondition(cdc.Status.Conditions, conditionsv1.ConditionAvailable) - if condition != nil && condition.Status == corev1.ConditionFalse { - actualAssignedCDCs++ + condition := conditionsv1.FindStatusCondition(cdc.Status.Conditions, hivev1.ApplySucceededCondition) + if test.expectedCDCReason != nil { + if reason, ok := test.expectedCDCReason[cdc.Name]; ok { + assert.NotNil(t, condition) + assert.Equal(t, reason, condition.Reason, "expected CDC status to match") + } + } + + if test.expectedAssignedCDCs != nil { + if cdName, ok := test.expectedAssignedCDCs[cdc.Name]; ok { + assert.Regexp(t, regexp.MustCompile(cdName), cdc.Status.ClusterDeploymentRef.Name, "expected CDC assignment to CD match") + } } } @@ -1888,8 +1877,6 @@ func TestReconcileClusterPool(t *testing.T) { lastTime = condition.LastTransitionTime } } - - assert.Equal(t, test.expectedAssignedCDCs, actualAssignedCDCs, "unexpected number of assigned CDCs") }) } } diff --git a/pkg/controller/clusterpool/collections.go b/pkg/controller/clusterpool/collections.go index 45e139dde0e..90d5496f165 100644 --- a/pkg/controller/clusterpool/collections.go +++ b/pkg/controller/clusterpool/collections.go @@ -232,7 +232,7 @@ func isBroken(cd *hivev1.ClusterDeployment, pool *hivev1.ClusterPool, logger log return true } - // Check if CD's customization exists in CP invnetory + // Check if CD's customization exists in CP inventory if cd.Spec.ClusterPoolRef != nil && cd.Spec.ClusterPoolRef.CustomizationRef != nil { customizationExists := false cdcName := cd.Spec.ClusterPoolRef.CustomizationRef.Name @@ -659,8 +659,7 @@ func getAllCustomizationsForPool(c client.Client, pool *hivev1.ClusterPool, logg } for i, cdc := range cdcList.Items { - ref := &cdcList.Items[i] - cdcCol.namespace[cdc.Name] = ref + cdcCol.namespace[cdc.Name] = &cdcList.Items[i] } for _, item := range pool.Spec.Inventory { @@ -740,6 +739,10 @@ func (cdcs *cdcCollection) Sort() { } func (cdcs *cdcCollection) Reserve(c client.Client, cdc *hivev1.ClusterDeploymentCustomization, cdName, poolName string) error { + if cdc.Status.ClusterDeploymentRef != nil || cdc.Status.ClusterPoolRef != nil { + return errors.New("ClusterDeploymentCustomization already reserved") + } + cdc.Status.ClusterDeploymentRef = &corev1.LocalObjectReference{Name: cdName} cdc.Status.ClusterPoolRef = &corev1.LocalObjectReference{Name: poolName} @@ -791,9 +794,7 @@ func (cdcs *cdcCollection) Unassign(c client.Client, cdc *hivev1.ClusterDeployme } } - if _, ok := cdcs.reserved[cdc.Name]; ok { - delete(cdcs.reserved, cdc.Name) - } + delete(cdcs.reserved, cdc.Name) cdcs.unassigned = append(cdcs.unassigned, cdc) cdcs.Sort() @@ -876,6 +877,10 @@ func (cdcs *cdcCollection) InstallationPending(c client.Client, cdc *hivev1.Clus return nil } +func (c *cdcCollection) Unassigned() []*hivev1.ClusterDeploymentCustomization { + return c.unassigned +} + // SyncClusterDeploymentCustomizations updates CDCs and related CR status: // - Handle deletion of CDC in the namespace // - If there is no CD, but CDC is reserved, then we release the CDC @@ -966,6 +971,9 @@ func (cdcs *cdcCollection) SyncClusterDeploymentCustomizationAssignments(c clien } } // Fix CDC availability + if err := cdcs.Unassign(c, cdc); err != nil { + return err + } if err := cdcs.Reserve(c, cdc, cd.Name, pool.Name); err != nil { return err } diff --git a/pkg/test/assert/assertions.go b/pkg/test/assert/assertions.go index 3c2432fac3a..c6391e4e8ca 100644 --- a/pkg/test/assert/assertions.go +++ b/pkg/test/assert/assertions.go @@ -13,6 +13,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" hivev1 "github.com/openshift/hive/apis/hive/v1" ) @@ -55,6 +56,15 @@ func findClusterDeploymentCondition(conditions []hivev1.ClusterDeploymentConditi return nil } +func findCDCCondition(conditions []conditionsv1.Condition, conditionType conditionsv1.ConditionType) *conditionsv1.Condition { + for i, condition := range conditions { + if condition.Type == conditionType { + return &conditions[i] + } + } + return nil +} + // AssertConditionStatus asserts if a condition is present on the cluster deployment and has the expected status func AssertConditionStatus(t *testing.T, cd *hivev1.ClusterDeployment, condType hivev1.ClusterDeploymentConditionType, status corev1.ConditionStatus) { condition := findClusterDeploymentCondition(cd.Status.Conditions, condType) @@ -80,6 +90,23 @@ func AssertConditions(t *testing.T, cd *hivev1.ClusterDeployment, expectedCondit } } +// AssertConditions asserts if the expected conditions are present on the cluster deployment. +// It also asserts if those conditions have the expected status, reason, and (optionally) message. +func AssertCDCConditions(t *testing.T, cdc *hivev1.ClusterDeploymentCustomization, expectedConditions []conditionsv1.Condition) { + testifyassert.LessOrEqual(t, len(expectedConditions), len(cdc.Status.Conditions), "some conditions are not present") + for _, expectedCond := range expectedConditions { + condition := findCDCCondition(cdc.Status.Conditions, expectedCond.Type) + if testifyassert.NotNilf(t, condition, "did not find expected condition type: %v", expectedCond.Type) { + testifyassert.Equal(t, expectedCond.Status, condition.Status, "condition found with unexpected status") + testifyassert.Equal(t, expectedCond.Reason, condition.Reason, "condition found with unexpected reason") + // Optionally validate the message + if expectedCond.Message != "" { + testifyassert.Equal(t, expectedCond.Message, condition.Message, "condition found with unexpected message") + } + } + } +} + // AssertEqualWhereItCounts compares two runtime.Objects, ignoring their ResourceVersion and TypeMeta, asserting that they // are otherwise equal. // This and cleanRVAndTypeMeta were borrowed/adapted from: From b86b1f294a8a58d41f33fb4468b82c0321766ba1 Mon Sep 17 00:00:00 2001 From: Alexander Braverman Masis Date: Mon, 30 May 2022 14:43:43 +0300 Subject: [PATCH 11/27] rebase --- hack/requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/hack/requirements.txt b/hack/requirements.txt index 38dbcfd7bda..0ac282c0723 100644 --- a/hack/requirements.txt +++ b/hack/requirements.txt @@ -1,2 +1,3 @@ GitPython PyYAML>=6.0 +yq \ No newline at end of file From 8c79fedae7011e434e8a7ff0fef40994ef92deb2 Mon Sep 17 00:00:00 2001 From: Alexander Braverman Masis Date: Wed, 8 Jun 2022 11:19:55 +0300 Subject: [PATCH 12/27] Another fix --- hack/e2e-pool-test.sh | 2 +- .../clusterpool/clusterpool_controller.go | 2 +- .../clusterpool_controller_test.go | 43 ++++--- pkg/controller/clusterpool/collections.go | 114 +++++++++--------- .../clusterdeploymentcustomization_types.go | 4 +- 5 files changed, 90 insertions(+), 75 deletions(-) diff --git a/hack/e2e-pool-test.sh b/hack/e2e-pool-test.sh index a50bfc4471c..027a55a438f 100755 --- a/hack/e2e-pool-test.sh +++ b/hack/e2e-pool-test.sh @@ -313,7 +313,7 @@ wait_for_hibernation_state $CLUSTER_NAME Running # Test customization create_customization "cdc-test" "${CLUSTER_NAMESPACE}" -oc patch cp -n $CLUSTER_NAMESPACE $REAL_POOL_NAME --type=merge -p '{"spec": {"inventory": "[{"name": "cdc-test"}]"}}' +oc patch cp -n $CLUSTER_NAMESPACE $REAL_POOL_NAME --type=merge -p '{"spec": {"inventory": [{"name": "cdc-test"}]}}' oc delete clusterclaim --all wait_for_pool_to_be_ready $REAL_POOL_NAME verify_cluster_name $REAL_POOL_NAME "cdc-test" "cdc-test" diff --git a/pkg/controller/clusterpool/clusterpool_controller.go b/pkg/controller/clusterpool/clusterpool_controller.go index d1ca8605d1c..cbe8df00fd3 100644 --- a/pkg/controller/clusterpool/clusterpool_controller.go +++ b/pkg/controller/clusterpool/clusterpool_controller.go @@ -775,7 +775,7 @@ func (r *ReconcileClusterPool) createCluster( poolRef := poolReference(clp) cd.Spec.ClusterPoolRef = &poolRef if clp.Spec.Inventory != nil { - cd.Spec.ClusterPoolRef.CustomizationRef = &corev1.LocalObjectReference{Name: cdcs.unassigned[0].Name} + cd.Spec.ClusterPoolRef.CustomizationRef = &corev1.LocalObjectReference{Name: cdcs.next} } } else if secretTmp := isInstallConfigSecret(obj); secretTmp != nil { secret = secretTmp diff --git a/pkg/controller/clusterpool/clusterpool_controller_test.go b/pkg/controller/clusterpool/clusterpool_controller_test.go index 24cc10a8b13..94818479187 100644 --- a/pkg/controller/clusterpool/clusterpool_controller_test.go +++ b/pkg/controller/clusterpool/clusterpool_controller_test.go @@ -361,6 +361,31 @@ func TestReconcileClusterPool(t *testing.T) { expectedInventoryAssignmentOrder: []string{"test-cdc-successful-old"}, expectedAssignedCDCs: map[string]string{"test-cdc-successful-old": ""}, }, + { + name: "cp with inventory - correct prioritization - mix and multiple deployments", + existing: []runtime.Object{ + initializedPoolBuilder.Build( + testcp.WithSize(2), + testcp.WithInventory([]string{"test-cdc-successful-old", "test-cdc-unused-new", "test-cdc-broken-old"}), + ), + testcdc.FullBuilder(testNamespace, "test-cdc-successful-old", scheme).Build( + testcdc.WithApplySucceeded(hivev1.CustomizationApplyReasonSucceeded, nowish.Add(-time.Hour)), + ), + testcdc.FullBuilder(testNamespace, "test-cdc-broken-old", scheme).Build( + testcdc.WithApplySucceeded(hivev1.CustomizationApplyReasonBrokenCloud, nowish.Add(-time.Hour)), + ), + testcdc.FullBuilder(testNamespace, "test-cdc-unused-new", scheme).Build(), + }, + expectedTotalClusters: 2, + expectedInventoryValidStatus: corev1.ConditionFalse, + expectInventory: true, + expectedInventoryAssignmentOrder: []string{"test-cdc-successful-old", "test-cdc-unused-new"}, + expectedAssignedCDCs: map[string]string{ + "test-cdc-successful-old": "", + "test-cdc-unused-new": "", + }, + }, + { name: "cp with inventory - correct prioritization - successful vs broken", existing: []runtime.Object{ @@ -1873,7 +1898,7 @@ func TestReconcileClusterPool(t *testing.T) { if condition == nil || condition.Status == corev1.ConditionUnknown || condition.Status == corev1.ConditionTrue { assert.Failf(t, "expected CDC %s to be assigned", cdcName) } - assert.True(t, lastTime.Before(&condition.LastTransitionTime), "expected %s to be before %s", lastTime, condition.LastTransitionTime) + assert.True(t, lastTime.Before(&condition.LastTransitionTime) || lastTime.Equal(&condition.LastTransitionTime), "expected %s to be before %s", lastTime, condition.LastTransitionTime) lastTime = condition.LastTransitionTime } } @@ -2636,8 +2661,6 @@ func TestReconcileRBAC(t *testing.T) { func Test_isBroken(t *testing.T) { logger := log.New() - basicPool := hivev1.ClusterPool{} - poolNoHibernationConfig := hivev1.ClusterPool{ Spec: hivev1.ClusterPoolSpec{}, } @@ -2831,20 +2854,6 @@ func Test_isBroken(t *testing.T) { pool: &poolWithTimeout, want: true, }, - { - name: "ClusterDeploymentCustomization was removed", - cd: testcd.BasicBuilder().Options( - testcd.WithCondition( - hivev1.ClusterDeploymentCondition{ - Type: hivev1.ProvisionStoppedCondition, - Status: corev1.ConditionFalse, - }), - testcd.WithClusterPoolReference(testNamespace, "cp", ""), - testcd.WithCustomization("test-cdc-1"), - ).Build(), - pool: &basicPool, - want: true, - }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/pkg/controller/clusterpool/collections.go b/pkg/controller/clusterpool/collections.go index 90d5496f165..a9dcbfc1143 100644 --- a/pkg/controller/clusterpool/collections.go +++ b/pkg/controller/clusterpool/collections.go @@ -210,6 +210,8 @@ type cdCollection struct { unknownPoolVersion []*hivev1.ClusterDeployment // Clusters whose pool version annotation doesn't match the pool's mismatchedPoolVersion []*hivev1.ClusterDeployment + // Cluster whose customization reference was removed from pool's inventory + customizationMissing []*hivev1.ClusterDeployment // All CDs in this pool byCDName map[string]*hivev1.ClusterDeployment // This contains only claimed CDs @@ -232,25 +234,6 @@ func isBroken(cd *hivev1.ClusterDeployment, pool *hivev1.ClusterPool, logger log return true } - // Check if CD's customization exists in CP inventory - if cd.Spec.ClusterPoolRef != nil && cd.Spec.ClusterPoolRef.CustomizationRef != nil { - customizationExists := false - cdcName := cd.Spec.ClusterPoolRef.CustomizationRef.Name - for _, entry := range pool.Spec.Inventory { - if cdcName == entry.Name { - customizationExists = true - } - } - if !customizationExists { - logger.WithFields(log.Fields{ - "ClusterDeployment": cd.Name, - "ClusterDeploymentCustomization": cdcName, - "ClusterPool": pool.Name, - }).Info("Cluster is broken due to a removed customization from pool's inventory") - return true - } - } - //// // Check for resume timeout //// @@ -331,6 +314,16 @@ func getAllClusterDeploymentsForPool(c client.Client, pool *hivev1.ClusterPool, }).Error("unepectedly got a ClusterDeployment not belonging to this pool") continue } + customizationExists := true + if cd.Spec.ClusterPoolRef.CustomizationRef != nil { + customizationExists = false + cdcName := cd.Spec.ClusterPoolRef.CustomizationRef.Name + for _, entry := range pool.Spec.Inventory { + if cdcName == entry.Name { + customizationExists = true + } + } + } ref := &cdList.Items[i] cdCol.byCDName[cd.Name] = ref claimName := poolRef.ClaimName @@ -351,7 +344,7 @@ func getAllClusterDeploymentsForPool(c client.Client, pool *hivev1.ClusterPool, } else { cdCol.installing = append(cdCol.installing, ref) } - // Count stale CDs (poolVersion either unknown or mismatched) + // Count stale CDs (poolVersion either unknown or mismatched, or customizaiton was removed) if cdPoolVersion, ok := cd.Annotations[constants.ClusterDeploymentPoolSpecHashAnnotation]; !ok || cdPoolVersion == "" { // Annotation is either missing or empty. This could be due to upgrade (this CD was // created before this code was installed) or manual intervention (outside agent mucked @@ -359,8 +352,9 @@ func getAllClusterDeploymentsForPool(c client.Client, pool *hivev1.ClusterPool, cdCol.unknownPoolVersion = append(cdCol.unknownPoolVersion, ref) } else if cdPoolVersion != poolVersion { cdCol.mismatchedPoolVersion = append(cdCol.mismatchedPoolVersion, ref) + } else if cdcRef := cd.Spec.ClusterPoolRef.CustomizationRef; cdcRef != nil && !customizationExists { + cdCol.customizationMissing = append(cdCol.customizationMissing, ref) } - } // Register all claimed CDs, even if they're deleting/marked if claimName != "" { @@ -511,7 +505,9 @@ func (cds *cdCollection) MismatchedPoolVersion() []*hivev1.ClusterDeployment { // Stale returns the list of ClusterDeployments whose pool version annotation doesn't match the // version of the pool. Put "unknown" first becuase they're annoying. func (cds *cdCollection) Stale() []*hivev1.ClusterDeployment { - return append(cds.unknownPoolVersion, cds.mismatchedPoolVersion...) + stale := append(cds.unknownPoolVersion, cds.mismatchedPoolVersion...) + stale = append(stale, cds.customizationMissing...) + return stale } // RegisterNewCluster adds a freshly-created cluster to the cdCollection, assuming it is installing. @@ -619,7 +615,7 @@ func (cds *cdCollection) Delete(c client.Client, cdName string) error { type cdcCollection struct { // Unclaimed by any cluster pool CD and are not broken - unassigned []*hivev1.ClusterDeploymentCustomization + unassigned map[string]*hivev1.ClusterDeploymentCustomization // Missing CDC means listed in pool inventory but the custom resource doesn't exist in the pool namespace missing []string // Used by some cluster deployment @@ -632,6 +628,8 @@ type cdcCollection struct { byCDCName map[string]*hivev1.ClusterDeploymentCustomization // Namespace are all the CDC in the namespace mapped by name namespace map[string]*hivev1.ClusterDeploymentCustomization + // Next CDC to assign + next string } // getAllCustomizationsForPool is the constructor for a cdcCollection for all of the @@ -649,7 +647,7 @@ func getAllCustomizationsForPool(c client.Client, pool *hivev1.ClusterPool, logg } cdcCol := cdcCollection{ - unassigned: make([]*hivev1.ClusterDeploymentCustomization, 0), + unassigned: make(map[string]*hivev1.ClusterDeploymentCustomization), missing: make([]string, 0), reserved: make(map[string]*hivev1.ClusterDeploymentCustomization), cloud: make(map[string]*hivev1.ClusterDeploymentCustomization), @@ -666,7 +664,7 @@ func getAllCustomizationsForPool(c client.Client, pool *hivev1.ClusterPool, logg if cdc, ok := cdcCol.namespace[item.Name]; ok { cdcCol.byCDCName[item.Name] = cdc if cdRef := cdc.Status.ClusterDeploymentRef; cdRef == nil { - cdcCol.unassigned = append(cdcCol.unassigned, cdc) + cdcCol.unassigned[item.Name] = cdc } else { cdcCol.reserved[item.Name] = cdc } @@ -702,14 +700,19 @@ func getAllCustomizationsForPool(c client.Client, pool *hivev1.ClusterPool, logg // customization. When customizations have the same last apply status, the // oldest used customization will be prioritized. func (cdcs *cdcCollection) Sort() { + var unassigned []*hivev1.ClusterDeploymentCustomization + for _, cdc := range cdcs.unassigned { + unassigned = append(unassigned, cdc) + } + sort.Slice( - cdcs.unassigned, + unassigned, func(i, j int) bool { now := metav1.NewTime(time.Now()) - iStatus := conditionsv1.FindStatusCondition(cdcs.unassigned[i].Status.Conditions, hivev1.ApplySucceededCondition) - jStatus := conditionsv1.FindStatusCondition(cdcs.unassigned[j].Status.Conditions, hivev1.ApplySucceededCondition) - iName := cdcs.unassigned[i].Name - jName := cdcs.unassigned[j].Name + iStatus := conditionsv1.FindStatusCondition(unassigned[i].Status.Conditions, hivev1.ApplySucceededCondition) + jStatus := conditionsv1.FindStatusCondition(unassigned[j].Status.Conditions, hivev1.ApplySucceededCondition) + iName := unassigned[i].Name + jName := unassigned[j].Name if iStatus == nil || iStatus.Status == corev1.ConditionUnknown { iStatus = &conditionsv1.Condition{Reason: hivev1.CustomizationApplyReasonSucceeded} iStatus.LastTransitionTime = now @@ -736,6 +739,9 @@ func (cdcs *cdcCollection) Sort() { return iName < jName }, ) + if len(unassigned) > 0 { + cdcs.next = unassigned[0].Name + } } func (cdcs *cdcCollection) Reserve(c client.Client, cdc *hivev1.ClusterDeploymentCustomization, cdName, poolName string) error { @@ -762,13 +768,7 @@ func (cdcs *cdcCollection) Reserve(c client.Client, cdc *hivev1.ClusterDeploymen cdcs.reserved[cdc.Name] = cdc cdcs.byCDCName[cdc.Name] = cdc - for i, cdci := range cdcs.unassigned { - if cdci.Name == cdc.Name { - copy(cdcs.unassigned[i:], cdcs.unassigned[i+1:]) - cdcs.unassigned = cdcs.unassigned[:len(cdcs.unassigned)-1] - break - } - } + delete(cdcs.unassigned, cdc.Name) cdcs.Sort() return nil @@ -796,7 +796,7 @@ func (cdcs *cdcCollection) Unassign(c client.Client, cdc *hivev1.ClusterDeployme delete(cdcs.reserved, cdc.Name) - cdcs.unassigned = append(cdcs.unassigned, cdc) + cdcs.unassigned[cdc.Name] = cdc cdcs.Sort() return nil } @@ -863,7 +863,7 @@ func (cdcs *cdcCollection) InstallationPending(c client.Client, cdc *hivev1.Clus Type: hivev1.ApplySucceededCondition, Status: corev1.ConditionFalse, Reason: hivev1.CustomizationApplyReasonInstallationPending, - Message: "Patches applied and cluster installed successfully", + Message: "Patches applied; cluster is installing", }) if changed { @@ -877,7 +877,7 @@ func (cdcs *cdcCollection) InstallationPending(c client.Client, cdc *hivev1.Clus return nil } -func (c *cdcCollection) Unassigned() []*hivev1.ClusterDeploymentCustomization { +func (c *cdcCollection) Unassigned() map[string]*hivev1.ClusterDeploymentCustomization { return c.unassigned } @@ -934,14 +934,16 @@ func (cdcs *cdcCollection) SyncClusterDeploymentCustomizationAssignments(c clien continue } + logger = logger.WithFields(log.Fields{ + "clusterdeployment": cd.Name, + "clusterdeploymentcustomization": cpRef.CustomizationRef.Name, + "namespace": cpRef.Namespace, + }) + // CDC exists cdc, ok := cdcs.namespace[cpRef.CustomizationRef.Name] if !ok { - logger.WithFields(log.Fields{ - "clusterdeployment": cd.Name, - "clusterdeploymentcustomization": cpRef.CustomizationRef.Name, - "namespace": cpRef.Namespace, - }).Warning("CD has reference to a CDC that doesn't exist, this is a bug") + logger.Warning("CD has reference to a CDC that doesn't exist, it was forceully removed or this is a bug") continue } @@ -962,10 +964,8 @@ func (cdcs *cdcCollection) SyncClusterDeploymentCustomizationAssignments(c clien } else { // Fixing reservation should be done by the appropriate cluster pool logger.WithFields(log.Fields{ - "clusterdeployment": cd.Name, - "parallelclusterdeployment": cdOther.Name, - "clusterdeploymentcustomization": cdc.Name, - "namespace": cdc.Namespace, + "parallelclusterdeployment": cdOther.Name, + "namespace": cdc.Namespace, }).Warning("Another CD exists and has this CDC reserved") continue } @@ -980,6 +980,8 @@ func (cdcs *cdcCollection) SyncClusterDeploymentCustomizationAssignments(c clien if cd.Spec.Installed { cdcs.Succeeded(c, cdc) + } else if isBroken(cd, pool, logger) { + cdcs.BrokenByCloud(c, cdc) } else { cdcs.InstallationPending(c, cdc) } @@ -1001,7 +1003,7 @@ func (cdcs *cdcCollection) SyncClusterDeploymentCustomizationAssignments(c clien "clusterdeployment": cd.Name, "clusterdeploymentcustomization": cdcRef.Name, "namespace": cd.Spec.ClusterPoolRef.Namespace, - }).Warning("CD has reference to a CDC that doesn't exist, this is a bug") + }).Warning("CD has reference to a CDC that doesn't exist, it was forceully removed or this is a bug") } } @@ -1020,7 +1022,7 @@ func (cdcs *cdcCollection) SyncClusterDeploymentCustomizationAssignments(c clien "clusterdeployment": cd.Name, "clusterdeploymentcustomization": cdcRef.Name, "namespace": cd.Spec.ClusterPoolRef.Namespace, - }).Warning("CD has reference to a CDC that doesn't exist, this is a bug") + }).Warning("CD has reference to a CDC that doesn't exist, it was forceully removed or this is a bug") } } @@ -1039,7 +1041,9 @@ func (cdcs *cdcCollection) UpdateInventoryValidCondition(c client.Client, pool * status := corev1.ConditionTrue reason := hivev1.InventoryReasonValid if (len(cdcs.syntax) + len(cdcs.cloud) + len(cdcs.missing)) > 0 { - messageByte, err := json.Marshal(cdcs) + // Send the cdcCollection to our custom marshaller that extracts and marshals just the invalid CDCs. + var b invalidCDCCollection = invalidCDCCollection(*cdcs) + messageByte, err := json.Marshal(&b) if err != nil { return err } @@ -1067,10 +1071,12 @@ func (cdcs *cdcCollection) UpdateInventoryValidCondition(c client.Client, pool * return nil } -var _ json.Marshaler = &cdcCollection{} +type invalidCDCCollection cdcCollection + +var _ json.Marshaler = &invalidCDCCollection{} // MarshalJSON cdcs implements the InventoryValid condition message -func (cdcs *cdcCollection) MarshalJSON() ([]byte, error) { +func (cdcs *invalidCDCCollection) MarshalJSON() ([]byte, error) { cloud := []string{} for _, cdc := range cdcs.cloud { cloud = append(cloud, cdc.Name) diff --git a/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go b/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go index 71a6351445c..8917e756c51 100644 --- a/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go +++ b/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go @@ -12,10 +12,10 @@ const ( CustomizationApplyReasonSucceeded = "Succeeded" // CustomizationApplyReasonBrokenSyntax indicates that Hive failed to apply // customization patches on install-config. More details would be found in - // Valid condition message. + // ApplySucceded condition message. CustomizationApplyReasonBrokenSyntax = "BrokenBySyntax" // CustomizationApplyReasonBrokenCloud indicates that cluster deployment provision has failed - // when using this customization. More details would be found in the Valid condition message. + // when using this customization. More details would be found in the ApplySucceeded condition message. CustomizationApplyReasonBrokenCloud = "BrokenByCloud" // CustomizationApplyReasonInstallationPending indicates that the customization patches have // been successfully applied but provisioning is not completed yet. From 97986dcce46b78562401c46b1ecaac1e1abefcae Mon Sep 17 00:00:00 2001 From: Alexander Braverman Masis Date: Wed, 8 Jun 2022 14:47:04 +0300 Subject: [PATCH 13/27] e2e: save clusterpool and customizations --- hack/e2e-common.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/hack/e2e-common.sh b/hack/e2e-common.sh index efb7b670959..605ad0b6b09 100755 --- a/hack/e2e-common.sh +++ b/hack/e2e-common.sh @@ -213,6 +213,8 @@ function capture_manifests() { oc get clusterstate -A -o yaml &> "${ARTIFACT_DIR}/hive_clusterstate.yaml" || true oc get dnszone -A -o yaml &> "${ARTIFACT_DIR}/hive_dnszones.yaml" || true oc get machinepool -A -o yaml &> "${ARTIFACT_DIR}/hive_machinepools.yaml" || true + oc get clusterdeploymentcustomization -A -o yaml &> "${ARTIFACT_DIR}/hive_clusterdeploymentcustomization.yaml" || true + oc get clusterpool -A -o yaml &> "${ARTIFACT_DIR}/hive_clusterppol.yaml" || true # Don't get the contents of the secrets, since they're sensitive; hopefully just listing them will be helpful. oc get secrets -A &> "${ARTIFACT_DIR}/secret_list.txt" || true } From 15f98670b04a0f033f7a5c0e75d2c2a2683fd847 Mon Sep 17 00:00:00 2001 From: Alexander Braverman Masis Date: Thu, 9 Jun 2022 09:10:33 +0300 Subject: [PATCH 14/27] e2e: fix typos --- hack/e2e-common.sh | 2 +- hack/e2e-pool-test.sh | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/hack/e2e-common.sh b/hack/e2e-common.sh index 605ad0b6b09..087bf419e01 100755 --- a/hack/e2e-common.sh +++ b/hack/e2e-common.sh @@ -214,7 +214,7 @@ function capture_manifests() { oc get dnszone -A -o yaml &> "${ARTIFACT_DIR}/hive_dnszones.yaml" || true oc get machinepool -A -o yaml &> "${ARTIFACT_DIR}/hive_machinepools.yaml" || true oc get clusterdeploymentcustomization -A -o yaml &> "${ARTIFACT_DIR}/hive_clusterdeploymentcustomization.yaml" || true - oc get clusterpool -A -o yaml &> "${ARTIFACT_DIR}/hive_clusterppol.yaml" || true + oc get clusterpool -A -o yaml &> "${ARTIFACT_DIR}/hive_clusterpool.yaml" || true # Don't get the contents of the secrets, since they're sensitive; hopefully just listing them will be helpful. oc get secrets -A &> "${ARTIFACT_DIR}/secret_list.txt" || true } diff --git a/hack/e2e-pool-test.sh b/hack/e2e-pool-test.sh index 027a55a438f..8fca05c2dad 100755 --- a/hack/e2e-pool-test.sh +++ b/hack/e2e-pool-test.sh @@ -32,12 +32,12 @@ apiVersion: hive.openshift.io/v1 kind: ClusterDeploymentCustomization metadata: name: $is_name - name: $ns + namespace: $ns spec: installConfigPatches: - op: replace path: /metadata/name - value: cdc-test + value: cdc-test EOF } From ec54e1d495a378c8a752356ba06f38e4c515b200 Mon Sep 17 00:00:00 2001 From: Alexander Braverman Masis Date: Tue, 14 Jun 2022 12:18:35 +0300 Subject: [PATCH 15/27] e2e: merge customization with original pool, revert unassigned to a list --- hack/e2e-pool-test.sh | 14 +++--- .../clusterpool/clusterpool_controller.go | 2 +- pkg/controller/clusterpool/collections.go | 45 +++++++++---------- 3 files changed, 29 insertions(+), 32 deletions(-) diff --git a/hack/e2e-pool-test.sh b/hack/e2e-pool-test.sh index 8fca05c2dad..3b42698c22b 100755 --- a/hack/e2e-pool-test.sh +++ b/hack/e2e-pool-test.sh @@ -214,6 +214,10 @@ go run "${SRC_ROOT}/contrib/cmd/hiveutil/main.go" clusterpool create-pool \ --size "${POOL_SIZE}" \ ${REAL_POOL_NAME} +# Add customization +create_customization "cdc-test" "${CLUSTER_NAMESPACE}" +oc patch cp -n $CLUSTER_NAMESPACE $REAL_POOL_NAME --type=merge -p '{"spec": {"inventory": [{"name": "cdc-test"}]}}' + ### INTERLUDE: FAKE POOL # The real cluster pool is going to take a while to become ready. While that # happens, create a fake pool and do some more testing. We'll use the real @@ -286,6 +290,9 @@ wait_for_pool_to_be_ready $FAKE_POOL_NAME # Wait for the real cluster pool to become ready (if it isn't yet) wait_for_pool_to_be_ready $REAL_POOL_NAME +# Test customization +verify_cluster_name $REAL_POOL_NAME "cdc-test" "cdc-test" + # Get the CD name & namespace (which should be the same) # TODO: Set this up for variable POOL_SIZE -- as written this would put # multiple results in CLUSTER_NAME; for >1 pool size we would not only @@ -311,11 +318,4 @@ set_power_state $CLUSTER_NAME Running wait_for_hibernation_state $CLUSTER_NAME Running -# Test customization -create_customization "cdc-test" "${CLUSTER_NAMESPACE}" -oc patch cp -n $CLUSTER_NAMESPACE $REAL_POOL_NAME --type=merge -p '{"spec": {"inventory": [{"name": "cdc-test"}]}}' -oc delete clusterclaim --all -wait_for_pool_to_be_ready $REAL_POOL_NAME -verify_cluster_name $REAL_POOL_NAME "cdc-test" "cdc-test" - # Let the cleanup trap do the cleanup. diff --git a/pkg/controller/clusterpool/clusterpool_controller.go b/pkg/controller/clusterpool/clusterpool_controller.go index cbe8df00fd3..d1ca8605d1c 100644 --- a/pkg/controller/clusterpool/clusterpool_controller.go +++ b/pkg/controller/clusterpool/clusterpool_controller.go @@ -775,7 +775,7 @@ func (r *ReconcileClusterPool) createCluster( poolRef := poolReference(clp) cd.Spec.ClusterPoolRef = &poolRef if clp.Spec.Inventory != nil { - cd.Spec.ClusterPoolRef.CustomizationRef = &corev1.LocalObjectReference{Name: cdcs.next} + cd.Spec.ClusterPoolRef.CustomizationRef = &corev1.LocalObjectReference{Name: cdcs.unassigned[0].Name} } } else if secretTmp := isInstallConfigSecret(obj); secretTmp != nil { secret = secretTmp diff --git a/pkg/controller/clusterpool/collections.go b/pkg/controller/clusterpool/collections.go index a9dcbfc1143..6cdbdfb3af9 100644 --- a/pkg/controller/clusterpool/collections.go +++ b/pkg/controller/clusterpool/collections.go @@ -321,6 +321,7 @@ func getAllClusterDeploymentsForPool(c client.Client, pool *hivev1.ClusterPool, for _, entry := range pool.Spec.Inventory { if cdcName == entry.Name { customizationExists = true + break } } } @@ -615,7 +616,7 @@ func (cds *cdCollection) Delete(c client.Client, cdName string) error { type cdcCollection struct { // Unclaimed by any cluster pool CD and are not broken - unassigned map[string]*hivev1.ClusterDeploymentCustomization + unassigned []*hivev1.ClusterDeploymentCustomization // Missing CDC means listed in pool inventory but the custom resource doesn't exist in the pool namespace missing []string // Used by some cluster deployment @@ -628,8 +629,6 @@ type cdcCollection struct { byCDCName map[string]*hivev1.ClusterDeploymentCustomization // Namespace are all the CDC in the namespace mapped by name namespace map[string]*hivev1.ClusterDeploymentCustomization - // Next CDC to assign - next string } // getAllCustomizationsForPool is the constructor for a cdcCollection for all of the @@ -647,7 +646,7 @@ func getAllCustomizationsForPool(c client.Client, pool *hivev1.ClusterPool, logg } cdcCol := cdcCollection{ - unassigned: make(map[string]*hivev1.ClusterDeploymentCustomization), + unassigned: make([]*hivev1.ClusterDeploymentCustomization, 0), missing: make([]string, 0), reserved: make(map[string]*hivev1.ClusterDeploymentCustomization), cloud: make(map[string]*hivev1.ClusterDeploymentCustomization), @@ -664,7 +663,7 @@ func getAllCustomizationsForPool(c client.Client, pool *hivev1.ClusterPool, logg if cdc, ok := cdcCol.namespace[item.Name]; ok { cdcCol.byCDCName[item.Name] = cdc if cdRef := cdc.Status.ClusterDeploymentRef; cdRef == nil { - cdcCol.unassigned[item.Name] = cdc + cdcCol.unassigned = append(cdcCol.unassigned, cdc) } else { cdcCol.reserved[item.Name] = cdc } @@ -700,19 +699,14 @@ func getAllCustomizationsForPool(c client.Client, pool *hivev1.ClusterPool, logg // customization. When customizations have the same last apply status, the // oldest used customization will be prioritized. func (cdcs *cdcCollection) Sort() { - var unassigned []*hivev1.ClusterDeploymentCustomization - for _, cdc := range cdcs.unassigned { - unassigned = append(unassigned, cdc) - } - sort.Slice( - unassigned, + cdcs.unassigned, func(i, j int) bool { now := metav1.NewTime(time.Now()) - iStatus := conditionsv1.FindStatusCondition(unassigned[i].Status.Conditions, hivev1.ApplySucceededCondition) - jStatus := conditionsv1.FindStatusCondition(unassigned[j].Status.Conditions, hivev1.ApplySucceededCondition) - iName := unassigned[i].Name - jName := unassigned[j].Name + iStatus := conditionsv1.FindStatusCondition(cdcs.unassigned[i].Status.Conditions, hivev1.ApplySucceededCondition) + jStatus := conditionsv1.FindStatusCondition(cdcs.unassigned[j].Status.Conditions, hivev1.ApplySucceededCondition) + iName := cdcs.unassigned[i].Name + jName := cdcs.unassigned[j].Name if iStatus == nil || iStatus.Status == corev1.ConditionUnknown { iStatus = &conditionsv1.Condition{Reason: hivev1.CustomizationApplyReasonSucceeded} iStatus.LastTransitionTime = now @@ -739,9 +733,6 @@ func (cdcs *cdcCollection) Sort() { return iName < jName }, ) - if len(unassigned) > 0 { - cdcs.next = unassigned[0].Name - } } func (cdcs *cdcCollection) Reserve(c client.Client, cdc *hivev1.ClusterDeploymentCustomization, cdName, poolName string) error { @@ -768,7 +759,13 @@ func (cdcs *cdcCollection) Reserve(c client.Client, cdc *hivev1.ClusterDeploymen cdcs.reserved[cdc.Name] = cdc cdcs.byCDCName[cdc.Name] = cdc - delete(cdcs.unassigned, cdc.Name) + for i, cdci := range cdcs.unassigned { + if cdci.Name == cdc.Name { + copy(cdcs.unassigned[i:], cdcs.unassigned[i+1:]) + cdcs.unassigned = cdcs.unassigned[:len(cdcs.unassigned)-1] + break + } + } cdcs.Sort() return nil @@ -796,7 +793,7 @@ func (cdcs *cdcCollection) Unassign(c client.Client, cdc *hivev1.ClusterDeployme delete(cdcs.reserved, cdc.Name) - cdcs.unassigned[cdc.Name] = cdc + cdcs.unassigned = append(cdcs.unassigned, cdc) cdcs.Sort() return nil } @@ -877,7 +874,7 @@ func (cdcs *cdcCollection) InstallationPending(c client.Client, cdc *hivev1.Clus return nil } -func (c *cdcCollection) Unassigned() map[string]*hivev1.ClusterDeploymentCustomization { +func (c *cdcCollection) Unassigned() []*hivev1.ClusterDeploymentCustomization { return c.unassigned } @@ -943,7 +940,7 @@ func (cdcs *cdcCollection) SyncClusterDeploymentCustomizationAssignments(c clien // CDC exists cdc, ok := cdcs.namespace[cpRef.CustomizationRef.Name] if !ok { - logger.Warning("CD has reference to a CDC that doesn't exist, it was forceully removed or this is a bug") + logger.Warning("CD has reference to a CDC that doesn't exist, it was forcefully removed or this is a bug") continue } @@ -1003,7 +1000,7 @@ func (cdcs *cdcCollection) SyncClusterDeploymentCustomizationAssignments(c clien "clusterdeployment": cd.Name, "clusterdeploymentcustomization": cdcRef.Name, "namespace": cd.Spec.ClusterPoolRef.Namespace, - }).Warning("CD has reference to a CDC that doesn't exist, it was forceully removed or this is a bug") + }).Warning("CD has reference to a CDC that doesn't exist, it was forcefully removed or this is a bug") } } @@ -1022,7 +1019,7 @@ func (cdcs *cdcCollection) SyncClusterDeploymentCustomizationAssignments(c clien "clusterdeployment": cd.Name, "clusterdeploymentcustomization": cdcRef.Name, "namespace": cd.Spec.ClusterPoolRef.Namespace, - }).Warning("CD has reference to a CDC that doesn't exist, it was forceully removed or this is a bug") + }).Warning("CD has reference to a CDC that doesn't exist, it was forcefully removed or this is a bug") } } From a4158bd284a3df642df25a52524bdb7afbd303f8 Mon Sep 17 00:00:00 2001 From: Alexander Braverman Masis Date: Sat, 18 Jun 2022 15:46:39 +0300 Subject: [PATCH 16/27] fix rebase --- go.sum | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/go.sum b/go.sum index ce620292034..f6409b9e2b2 100644 --- a/go.sum +++ b/go.sum @@ -827,8 +827,8 @@ github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/krishicks/yaml-patch v0.0.10 h1:H4FcHpnNwVmw8u0MjPRjWyIXtco6zM2F78t+57oNM3E= -github.com/krishicks/yaml-patch v0.0.10/go.mod h1:Sm5TchwZS6sm7RJoyg87tzxm2ZcKzdRE4Q7TjNhPrME= +github.com/krishicks/yaml-patch v0.0.11-0.20201210192933-7cea92d7f43e h1:Vzi98BTVyd/EHyrs3ZINMfeWg4u1dd6h07E0AHZVYIs= +github.com/krishicks/yaml-patch v0.0.11-0.20201210192933-7cea92d7f43e/go.mod h1:Sm5TchwZS6sm7RJoyg87tzxm2ZcKzdRE4Q7TjNhPrME= github.com/kulti/thelper v0.4.0 h1:2Nx7XbdbE/BYZeoip2mURKUdtHQRuy6Ug+wR7K9ywNM= github.com/kulti/thelper v0.4.0/go.mod h1:vMu2Cizjy/grP+jmsvOFDx1kYP6+PD1lqg4Yu5exl2U= github.com/kunwardeep/paralleltest v1.0.2 h1:/jJRv0TiqPoEy/Y8dQxCFJhD56uS/pnvtatgTZBHokU= From 8f4fb18c4052f0031719df1a11f09159bb641713 Mon Sep 17 00:00:00 2001 From: Alexander Braverman Masis Date: Sat, 18 Jun 2022 18:26:18 +0300 Subject: [PATCH 17/27] e2e: customize REAL POOL after creation of FAKE POOL --- hack/e2e-pool-test.sh | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/hack/e2e-pool-test.sh b/hack/e2e-pool-test.sh index 3b42698c22b..bb1e8c07fc7 100755 --- a/hack/e2e-pool-test.sh +++ b/hack/e2e-pool-test.sh @@ -214,10 +214,6 @@ go run "${SRC_ROOT}/contrib/cmd/hiveutil/main.go" clusterpool create-pool \ --size "${POOL_SIZE}" \ ${REAL_POOL_NAME} -# Add customization -create_customization "cdc-test" "${CLUSTER_NAMESPACE}" -oc patch cp -n $CLUSTER_NAMESPACE $REAL_POOL_NAME --type=merge -p '{"spec": {"inventory": [{"name": "cdc-test"}]}}' - ### INTERLUDE: FAKE POOL # The real cluster pool is going to take a while to become ready. While that # happens, create a fake pool and do some more testing. We'll use the real @@ -227,6 +223,11 @@ FAKE_POOL_NAME=fake-pool oc get clusterpool ${REAL_POOL_NAME} -o json \ | jq '.spec.annotations["hive.openshift.io/fake-cluster"] = "true" | .metadata.name = "'${FAKE_POOL_NAME}'" | .spec.size = 4' \ | oc apply -f - + +# Add customization to REAL POOL +create_customization "cdc-test" "${CLUSTER_NAMESPACE}" +oc patch cp -n $CLUSTER_NAMESPACE $REAL_POOL_NAME --type=merge -p '{"spec": {"inventory": [{"name": "cdc-test"}]}}' + wait_for_pool_to_be_ready $FAKE_POOL_NAME ## Test stale cluster replacement (HIVE-1058) From 4fddbe79ae4cb9fc2a36c4e84b8bf51989c76f49 Mon Sep 17 00:00:00 2001 From: Alexander Braverman Masis Date: Thu, 23 Jun 2022 13:48:46 +0300 Subject: [PATCH 18/27] e2e:random name for the new cluster --- hack/e2e-pool-test.sh | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/hack/e2e-pool-test.sh b/hack/e2e-pool-test.sh index bb1e8c07fc7..4a22053acc5 100755 --- a/hack/e2e-pool-test.sh +++ b/hack/e2e-pool-test.sh @@ -26,6 +26,7 @@ EOF function create_customization() { local is_name=$1 local ns=$2 + local cname=$3 echo "Creating ClusterDeploymentCustomization $is_name" oc apply -f -< Date: Fri, 24 Jun 2022 15:03:24 +0300 Subject: [PATCH 19/27] fix indentation and fail Reserve if not changed --- hack/requirements.txt | 2 +- .../clusterpool/clusterpool_controller.go | 24 ++++++++++--------- pkg/controller/clusterpool/collections.go | 8 ++++--- 3 files changed, 19 insertions(+), 15 deletions(-) diff --git a/hack/requirements.txt b/hack/requirements.txt index 0ac282c0723..38a508ea6f9 100644 --- a/hack/requirements.txt +++ b/hack/requirements.txt @@ -1,3 +1,3 @@ GitPython PyYAML>=6.0 -yq \ No newline at end of file +yq>=4.0 diff --git a/pkg/controller/clusterpool/clusterpool_controller.go b/pkg/controller/clusterpool/clusterpool_controller.go index d1ca8605d1c..2378eb0962d 100644 --- a/pkg/controller/clusterpool/clusterpool_controller.go +++ b/pkg/controller/clusterpool/clusterpool_controller.go @@ -199,18 +199,20 @@ func requestsForCDCResources(c client.Client, logger log.FieldLogger) handler.Ma var requests []reconcile.Request for _, cpl := range cpList.Items { - if cpl.Spec.Inventory != nil { - for _, entry := range cpl.Spec.Inventory { - if entry.Name == cdc.Name { - requests = append(requests, reconcile.Request{ - NamespacedName: types.NamespacedName{ - Namespace: cpl.Namespace, - Name: cpl.Name, - }, - }) - break - } + if cpl.Spec.Inventory == nil { + continue + } + for _, entry := range cpl.Spec.Inventory { + if entry.Name != cdc.Name { + continue } + requests = append(requests, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: cpl.Namespace, + Name: cpl.Name, + }, + }) + break } } diff --git a/pkg/controller/clusterpool/collections.go b/pkg/controller/clusterpool/collections.go index 6cdbdfb3af9..2aa0fd5e906 100644 --- a/pkg/controller/clusterpool/collections.go +++ b/pkg/controller/clusterpool/collections.go @@ -740,9 +740,6 @@ func (cdcs *cdcCollection) Reserve(c client.Client, cdc *hivev1.ClusterDeploymen return errors.New("ClusterDeploymentCustomization already reserved") } - cdc.Status.ClusterDeploymentRef = &corev1.LocalObjectReference{Name: cdName} - cdc.Status.ClusterPoolRef = &corev1.LocalObjectReference{Name: poolName} - changed := conditionsv1.SetStatusConditionNoHeartbeat(&cdc.Status.Conditions, conditionsv1.Condition{ Type: conditionsv1.ConditionAvailable, Status: corev1.ConditionFalse, @@ -754,8 +751,13 @@ func (cdcs *cdcCollection) Reserve(c client.Client, cdc *hivev1.ClusterDeploymen if err := c.Status().Update(context.Background(), cdc); err != nil { return err } + } else { + return errors.New("ClusterDeploymentCustomization already reserved") } + cdc.Status.ClusterDeploymentRef = &corev1.LocalObjectReference{Name: cdName} + cdc.Status.ClusterPoolRef = &corev1.LocalObjectReference{Name: poolName} + cdcs.reserved[cdc.Name] = cdc cdcs.byCDCName[cdc.Name] = cdc From cf5e4e0c59cd722a3a165f894b01af104cd5d9a8 Mon Sep 17 00:00:00 2001 From: Alexander Braverman Masis Date: Tue, 5 Jul 2022 21:24:11 +0300 Subject: [PATCH 20/27] Add support for ovirt and vshpere --- pkg/clusterresource/ovirt.go | 7 +++ pkg/clusterresource/vsphere.go | 9 ++++ .../clusterpool/clusterpool_controller.go | 46 ++++++++++++++++++- 3 files changed, 61 insertions(+), 1 deletion(-) diff --git a/pkg/clusterresource/ovirt.go b/pkg/clusterresource/ovirt.go index db55536b533..e435f3da8a5 100644 --- a/pkg/clusterresource/ovirt.go +++ b/pkg/clusterresource/ovirt.go @@ -38,6 +38,13 @@ type OvirtCloudBuilder struct { CACert []byte } +func NewOvirtCloudBuilderFromSecret(credsSecret *corev1.Secret) *OvirtCloudBuilder { + ovirtConfigYamlContent := credsSecret.Data[constants.OvirtCredentialsName] + return &OvirtCloudBuilder{ + OvirtConfig: ovirtConfigYamlContent, + } +} + func (p *OvirtCloudBuilder) GenerateCredentialsSecret(o *Builder) *corev1.Secret { return &corev1.Secret{ TypeMeta: metav1.TypeMeta{ diff --git a/pkg/clusterresource/vsphere.go b/pkg/clusterresource/vsphere.go index 3b0c6cbf97a..eb2aaca8a56 100644 --- a/pkg/clusterresource/vsphere.go +++ b/pkg/clusterresource/vsphere.go @@ -54,6 +54,15 @@ type VSphereCloudBuilder struct { CACert []byte } +func NewVSphereCloudBuilderFromSecret(credsSecret *corev1.Secret) *VSphereCloudBuilder { + username := credsSecret.Data[constants.UsernameSecretKey] + password := credsSecret.Data[constants.PasswordSecretKey] + return &VSphereCloudBuilder{ + Username: string(username), + Password: string(password), + } +} + func (p *VSphereCloudBuilder) GenerateCredentialsSecret(o *Builder) *corev1.Secret { return &corev1.Secret{ TypeMeta: metav1.TypeMeta{ diff --git a/pkg/controller/clusterpool/clusterpool_controller.go b/pkg/controller/clusterpool/clusterpool_controller.go index 2378eb0962d..fa2bb55c691 100644 --- a/pkg/controller/clusterpool/clusterpool_controller.go +++ b/pkg/controller/clusterpool/clusterpool_controller.go @@ -1160,7 +1160,51 @@ func (r *ReconcileClusterPool) createCloudBuilder(pool *hivev1.ClusterPool, logg cloudBuilder := clusterresource.NewOpenStackCloudBuilderFromSecret(credsSecret) cloudBuilder.Cloud = platform.OpenStack.Cloud return cloudBuilder, nil - // TODO: VMware, and Ovirt. + case platform.VSphere != nil: + credsSecret, err := r.getCredentialsSecret(pool, platform.VSphere.CredentialsSecretRef.Name, logger) + if err != nil { + return nil, err + } + + certsSecret, err := r.getCredentialsSecret(pool, platform.VSphere.CertificatesSecretRef.Name, logger) + if err != nil { + return nil, err + } + + if _, ok := certsSecret.Data[".cacert"]; !ok { + return nil, err + } + + cloudBuilder := clusterresource.NewVSphereCloudBuilderFromSecret(credsSecret) + cloudBuilder.Datacenter = platform.VSphere.Datacenter + cloudBuilder.DefaultDatastore = platform.VSphere.DefaultDatastore + cloudBuilder.VCenter = platform.VSphere.VCenter + cloudBuilder.Cluster = platform.VSphere.Cluster + cloudBuilder.Folder = platform.VSphere.Folder + cloudBuilder.Network = platform.VSphere.Network + + return cloudBuilder, nil + case platform.Ovirt != nil: + credsSecret, err := r.getCredentialsSecret(pool, platform.Ovirt.CredentialsSecretRef.Name, logger) + if err != nil { + return nil, err + } + + certsSecret, err := r.getCredentialsSecret(pool, platform.Ovirt.CertificatesSecretRef.Name, logger) + if err != nil { + return nil, err + } + + if _, ok := certsSecret.Data[".cacert"]; !ok { + return nil, err + } + + cloudBuilder := clusterresource.NewOvirtCloudBuilderFromSecret(credsSecret) + cloudBuilder.StorageDomainID = platform.Ovirt.StorageDomainID + cloudBuilder.ClusterID = platform.Ovirt.ClusterID + cloudBuilder.NetworkName = platform.Ovirt.NetworkName + + return cloudBuilder, nil default: logger.Info("unsupported platform") return nil, errors.New("unsupported platform") From 8fa20e2820b6d761a63e2d0549c90578c005c172 Mon Sep 17 00:00:00 2001 From: Alexander Braverman Masis Date: Mon, 25 Jul 2022 11:18:40 +0300 Subject: [PATCH 21/27] Remove hibernation change, update roles, fix cdc deletion process and fix vsphere --- config/rbac/hive_admin_role.yaml | 2 ++ config/rbac/hive_reader_role.yaml | 1 + pkg/clusterresource/vsphere.go | 4 +++- .../clusterpool/clusterpool_controller.go | 12 +++++++++++- pkg/controller/clusterpool/collections.go | 17 +++++++++++++---- .../hibernation/hibernation_controller.go | 8 +------- .../hibernation/hibernation_controller_test.go | 11 +++++++++-- pkg/operator/assets/bindata.go | 3 +++ 8 files changed, 43 insertions(+), 15 deletions(-) diff --git a/config/rbac/hive_admin_role.yaml b/config/rbac/hive_admin_role.yaml index cc7a0b075f3..d5a5ea8a560 100644 --- a/config/rbac/hive_admin_role.yaml +++ b/config/rbac/hive_admin_role.yaml @@ -37,6 +37,7 @@ rules: - syncsets - syncsetinstances - clusterdeprovisions + - clusterdeploymentcustomizations # TODO: remove once v1alpha1 compat removed - clusterdeprovisionrequests - clusterstates @@ -51,6 +52,7 @@ rules: - hiveconfigs - selectorsyncsets - selectorsyncidentityproviders + - clusterdeploymentcustomizations verbs: - get - list diff --git a/config/rbac/hive_reader_role.yaml b/config/rbac/hive_reader_role.yaml index 4cc8fa37ee8..b5ae7a48885 100644 --- a/config/rbac/hive_reader_role.yaml +++ b/config/rbac/hive_reader_role.yaml @@ -37,6 +37,7 @@ rules: - syncsets - syncsetinstances - clusterdeprovisions + - clusterdeploymentcustomizations # TODO: remove once v1alpha1 compat removed - clusterdeprovisionrequests - clusterstates diff --git a/pkg/clusterresource/vsphere.go b/pkg/clusterresource/vsphere.go index eb2aaca8a56..fcf823aa07f 100644 --- a/pkg/clusterresource/vsphere.go +++ b/pkg/clusterresource/vsphere.go @@ -54,12 +54,14 @@ type VSphereCloudBuilder struct { CACert []byte } -func NewVSphereCloudBuilderFromSecret(credsSecret *corev1.Secret) *VSphereCloudBuilder { +func NewVSphereCloudBuilderFromSecret(credsSecret, certsSecret *corev1.Secret) *VSphereCloudBuilder { username := credsSecret.Data[constants.UsernameSecretKey] password := credsSecret.Data[constants.PasswordSecretKey] + cacert := certsSecret.Data[".cacert"] return &VSphereCloudBuilder{ Username: string(username), Password: string(password), + CACert: cacert, } } diff --git a/pkg/controller/clusterpool/clusterpool_controller.go b/pkg/controller/clusterpool/clusterpool_controller.go index fa2bb55c691..5c42f6bc513 100644 --- a/pkg/controller/clusterpool/clusterpool_controller.go +++ b/pkg/controller/clusterpool/clusterpool_controller.go @@ -299,6 +299,10 @@ func (r *ReconcileClusterPool) Reconcile(ctx context.Context, request reconcile. return reconcile.Result{}, err } + if p := clp.Spec.Platform; clp.Spec.RunningCount != clp.Spec.Size && (p.OpenStack != nil || p.Ovirt != nil || p.VSphere != nil) { + return reconcile.Result{}, errors.New("Hibernation is not supported on Openstack, VShpere and Ovirt, unless runningCount==size") + } + // Initialize cluster pool conditions if not set newConditions, changed := controllerutils.InitializeClusterPoolConditions(clp.Status.Conditions, clusterPoolConditions) if changed { @@ -973,6 +977,12 @@ func (r *ReconcileClusterPool) reconcileDeletedPool(pool *hivev1.ClusterPool, lo return errors.Wrap(err, "could not delete ClusterDeployment") } } + cdcs, err := getAllCustomizationsForPool(r.Client, pool, logger) + if err != nil { + return err + } + // If CDC is shared by other clusterpool then the deletion should trigger other clusterpools to resolve any return the finalizer + cdcs.Release(pool) // TODO: Wait to remove finalizer until all (unclaimed??) clusters are gone. controllerutils.DeleteFinalizer(pool, finalizer) if err := r.Update(context.Background(), pool); err != nil { @@ -1175,7 +1185,7 @@ func (r *ReconcileClusterPool) createCloudBuilder(pool *hivev1.ClusterPool, logg return nil, err } - cloudBuilder := clusterresource.NewVSphereCloudBuilderFromSecret(credsSecret) + cloudBuilder := clusterresource.NewVSphereCloudBuilderFromSecret(credsSecret, certsSecret) cloudBuilder.Datacenter = platform.VSphere.Datacenter cloudBuilder.DefaultDatastore = platform.VSphere.DefaultDatastore cloudBuilder.VCenter = platform.VSphere.VCenter diff --git a/pkg/controller/clusterpool/collections.go b/pkg/controller/clusterpool/collections.go index 2aa0fd5e906..ae593960084 100644 --- a/pkg/controller/clusterpool/collections.go +++ b/pkg/controller/clusterpool/collections.go @@ -662,10 +662,11 @@ func getAllCustomizationsForPool(c client.Client, pool *hivev1.ClusterPool, logg for _, item := range pool.Spec.Inventory { if cdc, ok := cdcCol.namespace[item.Name]; ok { cdcCol.byCDCName[item.Name] = cdc - if cdRef := cdc.Status.ClusterDeploymentRef; cdRef == nil { - cdcCol.unassigned = append(cdcCol.unassigned, cdc) - } else { + availability := conditionsv1.FindStatusCondition(cdc.Status.Conditions, conditionsv1.ConditionAvailable) + if availability != nil && availability.Status == corev1.ConditionFalse { cdcCol.reserved[item.Name] = cdc + } else { + cdcCol.unassigned = append(cdcCol.unassigned, cdc) } applyStatus := conditionsv1.FindStatusCondition(cdc.Status.Conditions, hivev1.ApplySucceededCondition) if applyStatus == nil { @@ -880,6 +881,14 @@ func (c *cdcCollection) Unassigned() []*hivev1.ClusterDeploymentCustomization { return c.unassigned } +func (c *cdcCollection) Release(pool *hivev1.ClusterPool) { + for _, item := range pool.Spec.Inventory { + if cdc, ok := c.namespace[item.Name]; ok { + controllerutils.DeleteFinalizer(cdc, finalizer) + } + } +} + // SyncClusterDeploymentCustomizations updates CDCs and related CR status: // - Handle deletion of CDC in the namespace // - If there is no CD, but CDC is reserved, then we release the CDC @@ -918,7 +927,7 @@ func (cdcs *cdcCollection) SyncClusterDeploymentCustomizationAssignments(c clien // If there is no CD, but CDC is reserved, then we release the CDC for _, cdc := range cdcs.reserved { - if cds.ByName(cdc.Status.ClusterDeploymentRef.Name) == nil { + if ref := cdc.Status.ClusterDeploymentRef; ref == nil || cds.ByName(ref.Name) == nil { if err := cdcs.Unassign(c, cdc); err != nil { return err } diff --git a/pkg/controller/hibernation/hibernation_controller.go b/pkg/controller/hibernation/hibernation_controller.go index f3eb9190041..f0882fcbb5e 100644 --- a/pkg/controller/hibernation/hibernation_controller.go +++ b/pkg/controller/hibernation/hibernation_controller.go @@ -222,10 +222,7 @@ func (r *hibernationReconciler) Reconcile(ctx context.Context, request reconcile // set hibernating condition to false for unsupported clouds changed := r.setCDCondition(cd, hivev1.ClusterHibernatingCondition, hivev1.HibernatingReasonUnsupported, msg, corev1.ConditionFalse, cdLog) - rChanged := r.setCDCondition(cd, hivev1.ClusterReadyCondition, hivev1.ReadyReasonRunning, clusterRunningMsg, - corev1.ConditionTrue, cdLog) - if changed || rChanged { - cd.Status.PowerState = hivev1.ClusterPowerStateRunning + if changed { return reconcile.Result{}, r.updateClusterDeploymentStatus(cd, cdLog) } } else if hibernatingCondition.Reason == hivev1.HibernatingReasonUnsupported { @@ -910,9 +907,6 @@ func shouldStopMachines(cd *hivev1.ClusterDeployment, hibernatingCondition *hive // shouldStartMachines decides if machines should be started func shouldStartMachines(cd *hivev1.ClusterDeployment, hibernatingCondition *hivev1.ClusterDeploymentCondition, readyCondition *hivev1.ClusterDeploymentCondition) bool { - if hibernatingCondition.Reason == hivev1.HibernatingReasonUnsupported { - return true - } if cd.Spec.PowerState == hivev1.ClusterPowerStateHibernating { return false } diff --git a/pkg/controller/hibernation/hibernation_controller_test.go b/pkg/controller/hibernation/hibernation_controller_test.go index 67c3495891a..4bc77eb7677 100644 --- a/pkg/controller/hibernation/hibernation_controller_test.go +++ b/pkg/controller/hibernation/hibernation_controller_test.go @@ -118,7 +118,14 @@ func TestReconcile(t *testing.T) { Message: "Unsupported version, need version 4.4.8 or greater"})).Build(), cs: csBuilder.Build(), setupActuator: func(actuator *mock.MockHibernationActuator) { - actuator.EXPECT().CanHandle(gomock.Any()).AnyTimes().Return(false) + actuator.EXPECT().MachinesRunning(gomock.Any(), gomock.Any(), gomock.Any()).Times(1).Return(true, nil, nil) + }, + setupRemote: func(builder *remoteclientmock.MockBuilder) { + objs := []runtime.Object{} + objs = append(objs, readyNodes()...) + objs = append(objs, readyClusterOperators()...) + c := fake.NewFakeClientWithScheme(scheme, objs...) + builder.EXPECT().Build().Times(1).Return(c, nil) }, validate: func(t *testing.T, cd *hivev1.ClusterDeployment) { cond, runCond := getHibernatingAndRunningConditions(cd) @@ -936,10 +943,10 @@ func TestReconcile(t *testing.T) { t.Run(test.name, func(t *testing.T) { ctrl := gomock.NewController(t) mockActuator := mock.NewMockHibernationActuator(ctrl) + mockActuator.EXPECT().CanHandle(gomock.Any()).AnyTimes().Return(true) if test.setupActuator != nil { test.setupActuator(mockActuator) } - mockActuator.EXPECT().CanHandle(gomock.Any()).AnyTimes().Return(true) mockBuilder := remoteclientmock.NewMockBuilder(ctrl) if test.setupRemote != nil { test.setupRemote(mockBuilder) diff --git a/pkg/operator/assets/bindata.go b/pkg/operator/assets/bindata.go index e22ae4d68b5..cbbc1187782 100644 --- a/pkg/operator/assets/bindata.go +++ b/pkg/operator/assets/bindata.go @@ -1125,6 +1125,7 @@ rules: - syncsets - syncsetinstances - clusterdeprovisions + - clusterdeploymentcustomizations # TODO: remove once v1alpha1 compat removed - clusterdeprovisionrequests - clusterstates @@ -1139,6 +1140,7 @@ rules: - hiveconfigs - selectorsyncsets - selectorsyncidentityproviders + - clusterdeploymentcustomizations verbs: - get - list @@ -1508,6 +1510,7 @@ rules: - syncsets - syncsetinstances - clusterdeprovisions + - clusterdeploymentcustomizations # TODO: remove once v1alpha1 compat removed - clusterdeprovisionrequests - clusterstates From 229237f003b2c66ff7753ec1db83b31b7c0c631f Mon Sep 17 00:00:00 2001 From: Alexander Braverman Masis Date: Tue, 26 Jul 2022 10:02:38 +0300 Subject: [PATCH 22/27] Unique finalizer per clusterpool --- .../clusterpool/clusterpool_controller.go | 6 ++++-- pkg/controller/clusterpool/collections.go | 16 ++++++++++------ 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/pkg/controller/clusterpool/clusterpool_controller.go b/pkg/controller/clusterpool/clusterpool_controller.go index 5c42f6bc513..faf4e386bff 100644 --- a/pkg/controller/clusterpool/clusterpool_controller.go +++ b/pkg/controller/clusterpool/clusterpool_controller.go @@ -977,12 +977,14 @@ func (r *ReconcileClusterPool) reconcileDeletedPool(pool *hivev1.ClusterPool, lo return errors.Wrap(err, "could not delete ClusterDeployment") } } + cdcs, err := getAllCustomizationsForPool(r.Client, pool, logger) if err != nil { return err } - // If CDC is shared by other clusterpool then the deletion should trigger other clusterpools to resolve any return the finalizer - cdcs.Release(pool) + + cdcs.RemoveFinalizer(pool) + // TODO: Wait to remove finalizer until all (unclaimed??) clusters are gone. controllerutils.DeleteFinalizer(pool, finalizer) if err := r.Update(context.Background(), pool); err != nil { diff --git a/pkg/controller/clusterpool/collections.go b/pkg/controller/clusterpool/collections.go index ae593960084..871d496e237 100644 --- a/pkg/controller/clusterpool/collections.go +++ b/pkg/controller/clusterpool/collections.go @@ -881,10 +881,12 @@ func (c *cdcCollection) Unassigned() []*hivev1.ClusterDeploymentCustomization { return c.unassigned } -func (c *cdcCollection) Release(pool *hivev1.ClusterPool) { +func (c *cdcCollection) RemoveFinalizer(pool *hivev1.ClusterPool) { + poolFinalizer := fmt.Sprintf("hive.openshift.io/clusterpools/%s", pool.Name) + for _, item := range pool.Spec.Inventory { - if cdc, ok := c.namespace[item.Name]; ok { - controllerutils.DeleteFinalizer(cdc, finalizer) + if cdc, ok := c.byCDCName[item.Name]; ok { + controllerutils.DeleteFinalizer(cdc, poolFinalizer) } } } @@ -901,15 +903,17 @@ func (cdcs *cdcCollection) SyncClusterDeploymentCustomizationAssignments(c clien return nil } + poolFinalizer := fmt.Sprintf("hive.openshift.io/clusterpools/%s", pool.Name) + // Handle deletion of CDC in the namespace for _, cdc := range cdcs.namespace { isDeleted := cdc.DeletionTimestamp != nil - hasFinalizer := controllerutils.HasFinalizer(cdc, finalizer) + hasFinalizer := controllerutils.HasFinalizer(cdc, poolFinalizer) isAvailable := conditionsv1.FindStatusCondition(cdc.Status.Conditions, conditionsv1.ConditionAvailable) if isDeleted && (isAvailable == nil || isAvailable.Status != corev1.ConditionFalse) { // We can delete the finalizer for a deleted CDC only if it is not reserved if hasFinalizer { - controllerutils.DeleteFinalizer(cdc, finalizer) + controllerutils.DeleteFinalizer(cdc, poolFinalizer) if err := c.Update(context.Background(), cdc); err != nil { return err } @@ -917,7 +921,7 @@ func (cdcs *cdcCollection) SyncClusterDeploymentCustomizationAssignments(c clien } else { // Ensure the finalizer is present if the CDC is not deleted, OR if it is reserved if !hasFinalizer { - controllerutils.AddFinalizer(cdc, finalizer) + controllerutils.AddFinalizer(cdc, poolFinalizer) if err := c.Update(context.Background(), cdc); err != nil { return err } From 47161f73d34e693407c43e18ee505e2ac1e28e23 Mon Sep 17 00:00:00 2001 From: Alexander Braverman Masis Date: Tue, 26 Jul 2022 10:27:36 +0300 Subject: [PATCH 23/27] Remove unsupported hibernation check and fix rebase --- go.mod | 2 +- .../clusterpool/clusterpool_controller.go | 4 +- .../github.com/krishicks/yaml-patch/README.md | 72 ++++++++++++++++++- .../krishicks/yaml-patch/container.go | 24 +++++-- .../github.com/krishicks/yaml-patch/node.go | 34 ++++++++- .../krishicks/yaml-patch/operation.go | 35 +++++---- .../github.com/krishicks/yaml-patch/patch.go | 71 +++++++++++------- .../krishicks/yaml-patch/pathfinder.go | 66 +++++++++-------- vendor/modules.txt | 2 +- 9 files changed, 231 insertions(+), 79 deletions(-) diff --git a/go.mod b/go.mod index 475c45c68be..07afc28f21e 100644 --- a/go.mod +++ b/go.mod @@ -28,7 +28,7 @@ require ( github.com/heptio/velero v1.0.0 github.com/jonboulle/clockwork v0.2.2 github.com/json-iterator/go v1.1.12 - github.com/krishicks/yaml-patch v0.0.10 + github.com/krishicks/yaml-patch v0.0.11-0.20201210192933-7cea92d7f43e github.com/miekg/dns v1.1.35 github.com/modern-go/reflect2 v1.0.2 github.com/onsi/ginkgo v1.16.5 diff --git a/pkg/controller/clusterpool/clusterpool_controller.go b/pkg/controller/clusterpool/clusterpool_controller.go index faf4e386bff..0657eee9e77 100644 --- a/pkg/controller/clusterpool/clusterpool_controller.go +++ b/pkg/controller/clusterpool/clusterpool_controller.go @@ -507,10 +507,8 @@ func (r *ReconcileClusterPool) reconcileRunningClusters( ) for i := 0; i < len(cdList); i++ { cd := cdList[i] - hibernateCondition := controllerutils.FindClusterDeploymentCondition(cd.Status.Conditions, hivev1.ClusterHibernatingCondition) - hibernateUnsupported := hibernateCondition != nil && hibernateCondition.Reason == hivev1.HibernatingReasonUnsupported var desiredPowerState hivev1.ClusterPowerState - if i < runningCount || hibernateUnsupported { + if i < runningCount { desiredPowerState = hivev1.ClusterPowerStateRunning } else { desiredPowerState = hivev1.ClusterPowerStateHibernating diff --git a/vendor/github.com/krishicks/yaml-patch/README.md b/vendor/github.com/krishicks/yaml-patch/README.md index a782e6397c9..8cc7b4b2b64 100644 --- a/vendor/github.com/krishicks/yaml-patch/README.md +++ b/vendor/github.com/krishicks/yaml-patch/README.md @@ -1,9 +1,79 @@ # yaml-patch +### **Note: This repo is for all intents and purposes abandoned. I would suggest using [ytt](https://get-ytt.io/) instead!** + `yaml-patch` is a version of Evan Phoenix's [json-patch](https://github.com/evanphx/json-patch), which is an implementation of [JavaScript Object Notation (JSON) Patch](https://tools.ietf.org/html/rfc6902), -but for YAML. +directly transposed to YAML. + + +## Syntax + +General syntax is the following: + +```yaml +- op: + from: # only valid for the 'move' and 'copy' operations + path: # always mandatory + value: # only valid for 'add', 'replace' and 'test' operations +``` + +### Paths + +Supported YAML path are primarily those of +[RFC 6901 JSON Pointers](https://tools.ietf.org/html/rfc6901). + +A syntax extention with `=` was added to match any sub-element in a YAML +structure by key/value. + +For example, the following removes all sub-nodes of the `releases` array that +have a `name` key with a value of `cassandra`: + +```yaml +- op: remove + path: /releases/name=cassandra +``` + +A major caveat with `=`, is that it actually performs a _recursive_ search for +matching nodes. The root node at which the recursive search is initiated, is +the node matched by the path prefix before `=`. + +The second caveat is that the recursion stops at a matching node. With the +`add` operation, you could expect sub-nodes of matching nodes to also match, +but they don't. + +If your document is the following and you apply the patch above, then all +sub-nodes of `/releases` that match `name=cassandra` will be removed. + +```yaml +releases: # a recursive search is made, starting from this node + - name: cassandra # does match, will be removed + - - name: toto + - name: cassandra # does match, will be removed! + sub: + - name: cassandra # not matched: the recursion stops at matching parent node + - super: + sub: + name: cassandra # does match, will be removed! +``` + +#### Path Escaping + +As in RFC 6901, escape sequences are introduced by `~`. So, `~` is escaped +`~0`, `/` is escaped `~1`. There is no escape for `=` yet. + + +### Operations + +Supported patch operations are those of [RFC 6902](https://tools.ietf.org/html/rfc6902). + +- [`add`](https://tools.ietf.org/html/rfc6902#section-4.1) +- [`remove`](https://tools.ietf.org/html/rfc6902#section-4.2) +- [`replace`](https://tools.ietf.org/html/rfc6902#section-4.3) +- [`move`](https://tools.ietf.org/html/rfc6902#section-4.4) +- [`copy`](https://tools.ietf.org/html/rfc6902#section-4.5) +- [`test`](https://tools.ietf.org/html/rfc6902#section-4.6) ## Installing diff --git a/vendor/github.com/krishicks/yaml-patch/container.go b/vendor/github.com/krishicks/yaml-patch/container.go index bdc22f143de..4a5b275aea9 100644 --- a/vendor/github.com/krishicks/yaml-patch/container.go +++ b/vendor/github.com/krishicks/yaml-patch/container.go @@ -8,20 +8,36 @@ import ( // Container is the interface for performing operations on Nodes type Container interface { - Get(key string) (*Node, error) - Set(key string, val *Node) error - Add(key string, val *Node) error - Remove(key string) error + Get(keyOrIndex string) (*Node, error) + Set(keyOrIndex string, val *Node) error + Add(keyOrIndex string, val *Node) error + Remove(keyOrIndex string) error } type nodeMap map[interface{}]*Node +func (n *nodeMap) setAtRoot(val *Node) error { + switch vt := val.Container().(type) { + case *nodeMap: + for k, v := range *vt { + (*n)[k] = v + } + } + return nil +} + func (n *nodeMap) Set(key string, val *Node) error { + if len(key) == 0 { + return n.setAtRoot(val) + } (*n)[key] = val return nil } func (n *nodeMap) Add(key string, val *Node) error { + if len(key) == 0 { + return n.setAtRoot(val) + } (*n)[key] = val return nil } diff --git a/vendor/github.com/krishicks/yaml-patch/node.go b/vendor/github.com/krishicks/yaml-patch/node.go index 4837c8a983e..ae790f0cec7 100644 --- a/vendor/github.com/krishicks/yaml-patch/node.go +++ b/vendor/github.com/krishicks/yaml-patch/node.go @@ -1,6 +1,8 @@ package yamlpatch -import "reflect" +import ( + "reflect" +) // Node holds a YAML document that has not yet been processed into a NodeMap or // NodeSlice @@ -16,9 +18,33 @@ func NewNode(raw *interface{}) *Node { } } +// NewNodeFromMap returns a new Node based on a map[interface{}]interface{} +func NewNodeFromMap(m map[interface{}]interface{}) *Node { + var raw interface{} + raw = m + + return &Node{ + raw: &raw, + } +} + +// NewNodeFromSlice returns a new Node based on a []interface{} +func NewNodeFromSlice(s []interface{}) *Node { + var raw interface{} + raw = s + + return &Node{ + raw: &raw, + } +} + // MarshalYAML implements yaml.Marshaler, and returns the correct interface{} // to be marshaled func (n *Node) MarshalYAML() (interface{}, error) { + if n == nil { + return nil, nil + } + if n.container != nil { return n.container, nil } @@ -41,7 +67,7 @@ func (n *Node) UnmarshalYAML(unmarshal func(interface{}) error) error { // Empty returns whether the raw value is nil func (n *Node) Empty() bool { - return *n.raw == nil + return n == nil || *n.raw == nil } // Container returns the node as a Container @@ -74,6 +100,10 @@ func (n *Node) Container() Container { // Equal compares the values of the raw interfaces that the YAML was // unmarshaled into func (n *Node) Equal(other *Node) bool { + if n == nil { + return other == nil + } + return reflect.DeepEqual(*n.raw, *other.raw) } diff --git a/vendor/github.com/krishicks/yaml-patch/operation.go b/vendor/github.com/krishicks/yaml-patch/operation.go index 69353c77d6d..8575076dba0 100644 --- a/vendor/github.com/krishicks/yaml-patch/operation.go +++ b/vendor/github.com/krishicks/yaml-patch/operation.go @@ -11,18 +11,23 @@ type Op string // Ops const ( - opAdd Op = "add" - opRemove Op = "remove" - opReplace Op = "replace" - opMove Op = "move" - opCopy Op = "copy" - opTest Op = "test" + OpAdd Op = "add" + OpRemove Op = "remove" + OpReplace Op = "replace" + OpMove Op = "move" + OpCopy Op = "copy" + OpTest Op = "test" +) + +const ( + rootPath = "/" ) // OpPath is an RFC6902 'pointer' type OpPath string // Decompose returns the pointer's components: +// "/" => [], "" // "/foo" => [], "foo" // "/foo/1" => ["foo"], "1" // "/foo/1/bar" => ["foo", "1"], "bar" @@ -64,17 +69,17 @@ func (o *Operation) Perform(c Container) error { var err error switch o.Op { - case opAdd: + case OpAdd: err = tryAdd(c, o) - case opRemove: + case OpRemove: err = tryRemove(c, o) - case opReplace: + case OpReplace: err = tryReplace(c, o) - case opMove: + case OpMove: err = tryMove(c, o) - case opCopy: + case OpCopy: err = tryCopy(c, o) - case opTest: + case OpTest: err = tryTest(c, o) default: err = fmt.Errorf("Unexpected op: %s", o.Op) @@ -84,6 +89,10 @@ func (o *Operation) Perform(c Container) error { } func tryAdd(doc Container, op *Operation) error { + if op.Path == rootPath { + return doc.Add("", op.Value) + } + con, key, err := findContainer(doc, &op.Path) if err != nil { return fmt.Errorf("yamlpatch add operation does not apply: doc is missing path: %s", op.Path) @@ -169,7 +178,7 @@ func tryTest(doc Container, op *Operation) error { return err } - if op.Value.Empty() && val == nil { + if op.Value == nil && val.Empty() { return nil } diff --git a/vendor/github.com/krishicks/yaml-patch/patch.go b/vendor/github.com/krishicks/yaml-patch/patch.go index 910f39eb952..ec8f11c8346 100644 --- a/vendor/github.com/krishicks/yaml-patch/patch.go +++ b/vendor/github.com/krishicks/yaml-patch/patch.go @@ -1,7 +1,9 @@ package yamlpatch import ( + "bytes" "fmt" + "io" yaml "gopkg.in/yaml.v2" ) @@ -23,38 +25,59 @@ func DecodePatch(bs []byte) (Patch, error) { // Apply returns a YAML document that has been mutated per the patch func (p Patch) Apply(doc []byte) ([]byte, error) { - var iface interface{} - err := yaml.Unmarshal(doc, &iface) - if err != nil { - return nil, fmt.Errorf("failed unmarshaling doc: %s\n\n%s", string(doc), err) - } + decoder := yaml.NewDecoder(bytes.NewReader(doc)) + buf := bytes.NewBuffer([]byte{}) + encoder := yaml.NewEncoder(buf) - var c Container - c = NewNode(&iface).Container() - - for _, op := range p { - pathfinder := NewPathFinder(c) - if op.Path.ContainsExtendedSyntax() { - paths := pathfinder.Find(string(op.Path)) - if paths == nil { - return nil, fmt.Errorf("could not expand pointer: %s", op.Path) + for { + var iface interface{} + err := decoder.Decode(&iface) + if err != nil { + if err == io.EOF { + break } - for _, path := range paths { - newOp := op - newOp.Path = OpPath(path) - err = newOp.Perform(c) + return nil, fmt.Errorf("failed to decode doc: %s\n\n%s", string(doc), err) + } + + var c Container + c = NewNode(&iface).Container() + + for _, op := range p { + pathfinder := NewPathFinder(c) + if op.Path.ContainsExtendedSyntax() { + paths := pathfinder.Find(string(op.Path)) + if paths == nil { + return nil, fmt.Errorf("could not expand pointer: %s", op.Path) + } + + for i := len(paths) - 1; i >= 0; i-- { + path := paths[i] + newOp := op + newOp.Path = OpPath(path) + err := newOp.Perform(c) + if err != nil { + return nil, err + } + } + } else { + err := op.Perform(c) if err != nil { return nil, err } } - } else { - err = op.Perform(c) - if err != nil { - return nil, err - } } + + err = encoder.Encode(c) + if err != nil { + return nil, fmt.Errorf("failed to encode container: %s", err) + } + } + + err := encoder.Close() + if err != nil { + return nil, err } - return yaml.Marshal(c) + return buf.Bytes(), nil } diff --git a/vendor/github.com/krishicks/yaml-patch/pathfinder.go b/vendor/github.com/krishicks/yaml-patch/pathfinder.go index 06cfb133347..3a4f27aab81 100644 --- a/vendor/github.com/krishicks/yaml-patch/pathfinder.go +++ b/vendor/github.com/krishicks/yaml-patch/pathfinder.go @@ -3,6 +3,8 @@ package yamlpatch import ( "fmt" "strings" + + yaml "gopkg.in/yaml.v2" ) // PathFinder can be used to find RFC6902-standard paths given non-standard @@ -19,6 +21,11 @@ func NewPathFinder(container Container) *PathFinder { } } +type route struct { + key string + value Container +} + // Find expands the given path into all matching paths, returning the canonical // versions of those matching paths func (p *PathFinder) Find(path string) []string { @@ -28,8 +35,8 @@ func (p *PathFinder) Find(path string) []string { return []string{"/"} } - routes := map[string]Container{ - "": p.root, + routes := []route { + route{"", p.root}, } for _, part := range parts[1:] { @@ -37,73 +44,72 @@ func (p *PathFinder) Find(path string) []string { } var paths []string - for k := range routes { - paths = append(paths, k) + for _, r := range routes { + paths = append(paths, r.key) } return paths } -func find(part string, routes map[string]Container) map[string]Container { - matches := map[string]Container{} +func find(part string, routes []route) (matches []route) { + for _, r := range routes { + prefix := r.key + container := r.value - for prefix, container := range routes { if part == "-" { - for k := range routes { - matches[fmt.Sprintf("%s/-", k)] = routes[k] + for _, r = range routes { + matches = append(matches, route {fmt.Sprintf("%s/-", r.key), r.value}) } - return matches + return } if kv := strings.Split(part, "="); len(kv) == 2 { - if newMatches := findAll(prefix, kv[0], kv[1], container); len(newMatches) > 0 { + decoder := yaml.NewDecoder(strings.NewReader(kv[1])) + var value interface{} + if decoder.Decode(&value) != nil { + value = kv[1] + } + + if newMatches := findAll(prefix, kv[0], value, container); len(newMatches) > 0 { matches = newMatches } continue } - if node, err := container.Get(part); err == nil { + if node, err := container.Get(part); err == nil && node != nil { path := fmt.Sprintf("%s/%s", prefix, part) - if node == nil { - matches[path] = container - } else { - matches[path] = node.Container() - } + matches = append(matches, route {path, node.Container()}) } } - return matches + return } -func findAll(prefix, findKey, findValue string, container Container) map[string]Container { +func findAll(prefix, findKey string, findValue interface{}, container Container) (matches []route) { if container == nil { return nil } if v, err := container.Get(findKey); err == nil && v != nil { - if vs, ok := v.Value().(string); ok && vs == findValue { - return map[string]Container{ - prefix: container, - } + if v.Value() == findValue { + return []route {route{prefix, container}} } } - matches := map[string]Container{} - switch it := container.(type) { case *nodeMap: for k, v := range *it { - for route, match := range findAll(fmt.Sprintf("%s/%s", prefix, k), findKey, findValue, v.Container()) { - matches[route] = match + for _, r := range findAll(fmt.Sprintf("%s/%s", prefix, k), findKey, findValue, v.Container()) { + matches = append(matches, r) } } case *nodeSlice: for i, v := range *it { - for route, match := range findAll(fmt.Sprintf("%s/%d", prefix, i), findKey, findValue, v.Container()) { - matches[route] = match + for _, r := range findAll(fmt.Sprintf("%s/%d", prefix, i), findKey, findValue, v.Container()) { + matches = append(matches, r) } } } - return matches + return } diff --git a/vendor/modules.txt b/vendor/modules.txt index 39fcfa387df..d8c65f9c2ad 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -681,7 +681,7 @@ github.com/kisielk/gotool github.com/kisielk/gotool/internal/load # github.com/kr/pty v1.1.8 ## explicit; go 1.12 -# github.com/krishicks/yaml-patch v0.0.10 +# github.com/krishicks/yaml-patch v0.0.11-0.20201210192933-7cea92d7f43e ## explicit github.com/krishicks/yaml-patch # github.com/kulti/thelper v0.4.0 From af52c84ad2f62ebc5044633e98481ccf9943edad Mon Sep 17 00:00:00 2001 From: Alexander Braverman Masis Date: Tue, 26 Jul 2022 14:38:06 +0300 Subject: [PATCH 24/27] Fix finalizer name --- pkg/controller/clusterpool/collections.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/controller/clusterpool/collections.go b/pkg/controller/clusterpool/collections.go index 871d496e237..cd1098ca87b 100644 --- a/pkg/controller/clusterpool/collections.go +++ b/pkg/controller/clusterpool/collections.go @@ -882,7 +882,7 @@ func (c *cdcCollection) Unassigned() []*hivev1.ClusterDeploymentCustomization { } func (c *cdcCollection) RemoveFinalizer(pool *hivev1.ClusterPool) { - poolFinalizer := fmt.Sprintf("hive.openshift.io/clusterpools/%s", pool.Name) + poolFinalizer := fmt.Sprintf("hive.openshift.io/%s", pool.Name) for _, item := range pool.Spec.Inventory { if cdc, ok := c.byCDCName[item.Name]; ok { @@ -903,7 +903,7 @@ func (cdcs *cdcCollection) SyncClusterDeploymentCustomizationAssignments(c clien return nil } - poolFinalizer := fmt.Sprintf("hive.openshift.io/clusterpools/%s", pool.Name) + poolFinalizer := fmt.Sprintf("hive.openshift.io/%s", pool.Name) // Handle deletion of CDC in the namespace for _, cdc := range cdcs.namespace { From f7ef0bfb0db0d18793d9d3b8aef38d19c8981a8f Mon Sep 17 00:00:00 2001 From: Alexander Braverman Masis Date: Tue, 26 Jul 2022 17:37:02 +0300 Subject: [PATCH 25/27] Fix finalizer process --- .../clusterpool/clusterpool_controller.go | 17 ++++++++++------- pkg/controller/clusterpool/collections.go | 14 ++++++++++---- 2 files changed, 20 insertions(+), 11 deletions(-) diff --git a/pkg/controller/clusterpool/clusterpool_controller.go b/pkg/controller/clusterpool/clusterpool_controller.go index 0657eee9e77..df1ad6d6c6e 100644 --- a/pkg/controller/clusterpool/clusterpool_controller.go +++ b/pkg/controller/clusterpool/clusterpool_controller.go @@ -961,6 +961,16 @@ func (r *ReconcileClusterPool) reconcileDeletedPool(pool *hivev1.ClusterPool, lo if !controllerutils.HasFinalizer(pool, finalizer) { return nil } + + cdcs, err := getAllCustomizationsForPool(r.Client, pool, logger) + if err != nil { + return err + } + + if err := cdcs.RemoveFinalizer(r.Client, pool); err != nil { + return err + } + // Don't care about the poolVersion here since we're deleting everything. cds, err := getAllClusterDeploymentsForPool(r.Client, pool, "", logger) if err != nil { @@ -976,13 +986,6 @@ func (r *ReconcileClusterPool) reconcileDeletedPool(pool *hivev1.ClusterPool, lo } } - cdcs, err := getAllCustomizationsForPool(r.Client, pool, logger) - if err != nil { - return err - } - - cdcs.RemoveFinalizer(pool) - // TODO: Wait to remove finalizer until all (unclaimed??) clusters are gone. controllerutils.DeleteFinalizer(pool, finalizer) if err := r.Update(context.Background(), pool); err != nil { diff --git a/pkg/controller/clusterpool/collections.go b/pkg/controller/clusterpool/collections.go index cd1098ca87b..ca55aef4ace 100644 --- a/pkg/controller/clusterpool/collections.go +++ b/pkg/controller/clusterpool/collections.go @@ -877,18 +877,24 @@ func (cdcs *cdcCollection) InstallationPending(c client.Client, cdc *hivev1.Clus return nil } -func (c *cdcCollection) Unassigned() []*hivev1.ClusterDeploymentCustomization { - return c.unassigned +func (cdcs *cdcCollection) Unassigned() []*hivev1.ClusterDeploymentCustomization { + return cdcs.unassigned } -func (c *cdcCollection) RemoveFinalizer(pool *hivev1.ClusterPool) { +func (cdcs *cdcCollection) RemoveFinalizer(c client.Client, pool *hivev1.ClusterPool) error { poolFinalizer := fmt.Sprintf("hive.openshift.io/%s", pool.Name) for _, item := range pool.Spec.Inventory { - if cdc, ok := c.byCDCName[item.Name]; ok { + if cdc, ok := cdcs.namespace[item.Name]; ok { controllerutils.DeleteFinalizer(cdc, poolFinalizer) + if err := c.Update(context.Background(), cdc); err != nil { + return err + } + cdcs.namespace[item.Name] = cdc } } + + return nil } // SyncClusterDeploymentCustomizations updates CDCs and related CR status: From 5f0fd62312d6985fed6cf1c18a1b44cf2b0c9028 Mon Sep 17 00:00:00 2001 From: Alexander Braverman Masis Date: Mon, 1 Aug 2022 17:25:02 +0300 Subject: [PATCH 26/27] Fix tests --- pkg/controller/clusterpool/clusterpool_controller_test.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/pkg/controller/clusterpool/clusterpool_controller_test.go b/pkg/controller/clusterpool/clusterpool_controller_test.go index 94818479187..9ef0c325a71 100644 --- a/pkg/controller/clusterpool/clusterpool_controller_test.go +++ b/pkg/controller/clusterpool/clusterpool_controller_test.go @@ -3,6 +3,7 @@ package clusterpool import ( "context" "encoding/json" + "fmt" "regexp" "sort" "testing" @@ -230,7 +231,7 @@ func TestReconcileClusterPool(t *testing.T) { testNamespace, "test-cdc-1", scheme, ).GenericOptions( testgeneric.Deleted(), - testgeneric.WithFinalizer(finalizer), + testgeneric.WithFinalizer(fmt.Sprintf("hive.openshift.io/%s", testLeasePoolName)), ).Build(), }, expectedTotalClusters: 0, @@ -1770,7 +1771,7 @@ func TestReconcileClusterPool(t *testing.T) { } if test.expectedInventoryValidStatus != "" { - inventoryValidCondition := controllerutils.FindClusterPoolCondition(pool.Status.Conditions, hivev1.ClusterPoolInventoryValidCondition) + inventoryValidCondition := controllerutils.FindCondition(pool.Status.Conditions, hivev1.ClusterPoolInventoryValidCondition) if assert.NotNil(t, inventoryValidCondition, "did not find InventoryValid condition") { assert.Equal(t, test.expectedInventoryValidStatus, inventoryValidCondition.Status, "unexpcted InventoryValid condition status %s", inventoryValidCondition.Message) @@ -1778,7 +1779,7 @@ func TestReconcileClusterPool(t *testing.T) { } if test.expectedInventoryMessage != nil { - inventoryValidCondition := controllerutils.FindClusterPoolCondition(pool.Status.Conditions, hivev1.ClusterPoolInventoryValidCondition) + inventoryValidCondition := controllerutils.FindCondition(pool.Status.Conditions, hivev1.ClusterPoolInventoryValidCondition) if assert.NotNil(t, inventoryValidCondition, "did not find InventoryValid condition") { expectedInventoryMessage := map[string][]string{} err := json.Unmarshal([]byte(inventoryValidCondition.Message), &expectedInventoryMessage) From 1786bbcc9c8827b757d9646753994b2b8c321343 Mon Sep 17 00:00:00 2001 From: Alexander Braverman Masis Date: Mon, 1 Aug 2022 21:03:38 +0300 Subject: [PATCH 27/27] Fix verify --- apis/go.mod | 2 +- apis/go.sum | 4 +- .../conditions/v1/conditions.go | 46 +++++++++++-------- apis/vendor/modules.txt | 2 +- 4 files changed, 32 insertions(+), 22 deletions(-) diff --git a/apis/go.mod b/apis/go.mod index b7db25e0310..f9c3928b596 100644 --- a/apis/go.mod +++ b/apis/go.mod @@ -4,7 +4,7 @@ go 1.18 require ( github.com/openshift/api v0.0.0-20220531073726-6c4f186339a7 - github.com/openshift/custom-resource-status v1.1.2 + github.com/openshift/custom-resource-status v1.1.3-0.20220503160415-f2fdb4999d87 k8s.io/api v0.24.1 k8s.io/apimachinery v0.24.1 ) diff --git a/apis/go.sum b/apis/go.sum index 6d6c0514b76..7cd56d0b7ed 100644 --- a/apis/go.sum +++ b/apis/go.sum @@ -125,8 +125,8 @@ github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5h github.com/openshift/api v0.0.0-20220531073726-6c4f186339a7 h1:bkBOsI/Yd+cBT+/aXkbbNo+imvq4VKRusoCluIGOBBg= github.com/openshift/api v0.0.0-20220531073726-6c4f186339a7/go.mod h1:LEnw1IVscIxyDnltE3Wi7bQb/QzIM8BfPNKoGA1Qlxw= github.com/openshift/build-machinery-go v0.0.0-20211213093930-7e33a7eb4ce3/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= -github.com/openshift/custom-resource-status v1.1.2 h1:C3DL44LEbvlbItfd8mT5jWrqPfHnSOQoQf/sypqA6A4= -github.com/openshift/custom-resource-status v1.1.2/go.mod h1:DB/Mf2oTeiAmVVX1gN+NEqweonAPY0TKUwADizj8+ZA= +github.com/openshift/custom-resource-status v1.1.3-0.20220503160415-f2fdb4999d87 h1:cHyxR+Y8rAMT6m1jQCaYGRwikqahI0OjjUDhFNf3ySQ= +github.com/openshift/custom-resource-status v1.1.3-0.20220503160415-f2fdb4999d87/go.mod h1:DB/Mf2oTeiAmVVX1gN+NEqweonAPY0TKUwADizj8+ZA= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= diff --git a/apis/vendor/github.com/openshift/custom-resource-status/conditions/v1/conditions.go b/apis/vendor/github.com/openshift/custom-resource-status/conditions/v1/conditions.go index bbeee804a2b..7f98c60a063 100644 --- a/apis/vendor/github.com/openshift/custom-resource-status/conditions/v1/conditions.go +++ b/apis/vendor/github.com/openshift/custom-resource-status/conditions/v1/conditions.go @@ -8,7 +8,8 @@ import ( ) // SetStatusCondition sets the corresponding condition in conditions to newCondition. -func SetStatusCondition(conditions *[]Condition, newCondition Condition) { +// The return value indicates if this resulted in any changes *other than* LastHeartbeatTime. +func SetStatusCondition(conditions *[]Condition, newCondition Condition) bool { if conditions == nil { conditions = &[]Condition{} } @@ -17,22 +18,18 @@ func SetStatusCondition(conditions *[]Condition, newCondition Condition) { newCondition.LastTransitionTime = metav1.NewTime(time.Now()) newCondition.LastHeartbeatTime = metav1.NewTime(time.Now()) *conditions = append(*conditions, newCondition) - return - } - - if existingCondition.Status != newCondition.Status { - existingCondition.Status = newCondition.Status - existingCondition.LastTransitionTime = metav1.NewTime(time.Now()) + return true } - existingCondition.Reason = newCondition.Reason - existingCondition.Message = newCondition.Message + changed := updateCondition(existingCondition, newCondition) existingCondition.LastHeartbeatTime = metav1.NewTime(time.Now()) + return changed } // SetStatusConditionNoHearbeat sets the corresponding condition in conditions to newCondition // without setting lastHeartbeatTime. -func SetStatusConditionNoHeartbeat(conditions *[]Condition, newCondition Condition) { +// The return value indicates if this resulted in any changes. +func SetStatusConditionNoHeartbeat(conditions *[]Condition, newCondition Condition) bool { if conditions == nil { conditions = &[]Condition{} } @@ -40,16 +37,10 @@ func SetStatusConditionNoHeartbeat(conditions *[]Condition, newCondition Conditi if existingCondition == nil { newCondition.LastTransitionTime = metav1.NewTime(time.Now()) *conditions = append(*conditions, newCondition) - return + return true } - if existingCondition.Status != newCondition.Status { - existingCondition.Status = newCondition.Status - existingCondition.LastTransitionTime = metav1.NewTime(time.Now()) - } - - existingCondition.Reason = newCondition.Reason - existingCondition.Message = newCondition.Message + return updateCondition(existingCondition, newCondition) } // RemoveStatusCondition removes the corresponding conditionType from conditions. @@ -67,6 +58,25 @@ func RemoveStatusCondition(conditions *[]Condition, conditionType ConditionType) *conditions = newConditions } +func updateCondition(existingCondition *Condition, newCondition Condition) bool { + changed := false + if existingCondition.Status != newCondition.Status { + changed = true + existingCondition.Status = newCondition.Status + existingCondition.LastTransitionTime = metav1.NewTime(time.Now()) + } + + if existingCondition.Reason != newCondition.Reason { + changed = true + existingCondition.Reason = newCondition.Reason + } + if existingCondition.Message != newCondition.Message { + changed = true + existingCondition.Message = newCondition.Message + } + return changed +} + // FindStatusCondition finds the conditionType in conditions. func FindStatusCondition(conditions []Condition, conditionType ConditionType) *Condition { for i := range conditions { diff --git a/apis/vendor/modules.txt b/apis/vendor/modules.txt index 4e924cf9b40..ef96cc2d404 100644 --- a/apis/vendor/modules.txt +++ b/apis/vendor/modules.txt @@ -21,7 +21,7 @@ github.com/modern-go/reflect2 # github.com/openshift/api v0.0.0-20220531073726-6c4f186339a7 ## explicit; go 1.16 github.com/openshift/api/config/v1 -# github.com/openshift/custom-resource-status v1.1.2 +# github.com/openshift/custom-resource-status v1.1.3-0.20220503160415-f2fdb4999d87 ## explicit; go 1.12 github.com/openshift/custom-resource-status/conditions/v1 # golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd