From 18ac472a411d3ec1c32246bc0a4b44324f65aed1 Mon Sep 17 00:00:00 2001 From: Alexander Braverman Masis Date: Sun, 2 Jan 2022 10:10:38 +0200 Subject: [PATCH] ClusterPool Inventory --- apis/hive/v1/clusterdeployment_types.go | 6 + .../clusterdeploymentcustomization_types.go | 90 +++++ apis/hive/v1/clusterpool_types.go | 48 +++ apis/hive/v1/zz_generated.deepcopy.go | 170 +++++++++ cmd/hiveadmission/main.go | 1 + ...ft.io_clusterdeploymentcustomizations.yaml | 115 ++++++ .../hive.openshift.io_clusterdeployments.yaml | 10 + .../crds/hive.openshift.io_clusterpools.yaml | 62 ++++ hack/app-sre/kustomization.yaml | 1 + hack/app-sre/saas-template.yaml | 192 ++++++++++ .../hive/v1/clusterdeploymentcustomization.go | 162 ++++++++ .../fake_clusterdeploymentcustomization.go | 114 ++++++ .../typed/hive/v1/fake/fake_hive_client.go | 4 + .../typed/hive/v1/generated_expansion.go | 2 + .../versioned/typed/hive/v1/hive_client.go | 5 + .../informers/externalversions/generic.go | 2 + .../hive/v1/clusterdeploymentcustomization.go | 74 ++++ .../externalversions/hive/v1/interface.go | 7 + .../hive/v1/clusterdeploymentcustomization.go | 83 +++++ .../listers/hive/v1/expansion_generated.go | 8 + pkg/clusterresource/openstack.go | 22 +- .../clusterdeployment_controller.go | 75 +++- .../clusterdeployment/clusterprovisions.go | 1 - .../clusterpool/clusterpool_controller.go | 346 +++++++++++++++++- .../clusterpool_controller_test.go | 108 +++++- pkg/controller/clusterpool/collections.go | 13 + pkg/controller/utils/conditions.go | 52 +++ .../clusterdeployment/clusterdeployment.go | 7 + .../clusterdeploymentcustomization.go | 73 ++++ pkg/test/clusterpool/clusterpool.go | 6 + ...customization_validating_admission_hook.go | 285 +++++++++++++++ .../clusterpool_validating_admission_hook.go | 16 + .../apis/hive/v1/clusterdeployment_types.go | 6 + .../clusterdeploymentcustomization_types.go | 90 +++++ .../hive/apis/hive/v1/clusterpool_types.go | 48 +++ .../apis/hive/v1/zz_generated.deepcopy.go | 170 +++++++++ 36 files changed, 2463 insertions(+), 11 deletions(-) create mode 100644 apis/hive/v1/clusterdeploymentcustomization_types.go create mode 100644 config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml create mode 100644 pkg/client/clientset/versioned/typed/hive/v1/clusterdeploymentcustomization.go create mode 100644 pkg/client/clientset/versioned/typed/hive/v1/fake/fake_clusterdeploymentcustomization.go create mode 100644 pkg/client/informers/externalversions/hive/v1/clusterdeploymentcustomization.go create mode 100644 pkg/client/listers/hive/v1/clusterdeploymentcustomization.go create mode 100644 pkg/test/clusterdeploymentcustomization/clusterdeploymentcustomization.go create mode 100644 pkg/validating-webhooks/hive/v1/clusterdeploymentcustomization_validating_admission_hook.go create mode 100644 vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go diff --git a/apis/hive/v1/clusterdeployment_types.go b/apis/hive/v1/clusterdeployment_types.go index 3fe46701981..948e15fa4d0 100644 --- a/apis/hive/v1/clusterdeployment_types.go +++ b/apis/hive/v1/clusterdeployment_types.go @@ -50,6 +50,10 @@ const ( // FinalizerArgoCDCluster is used on ClusterDeployments to ensure we clean up the ArgoCD cluster // secret before cleaning up the API object. FinalizerArgoCDCluster = "hive.openshift.io/argocd-cluster" + + // FinalizerCustomizationRelease is used on ClusterDeployments to ensure we run a successful + // release of ClusterDeploymentCustomization + FinalizerCustomizationRelease string = "hive.openshift.io/customization" ) // ClusterPowerState is used to indicate whether a cluster is running or in a @@ -271,6 +275,8 @@ type ClusterPoolReference struct { // ClaimedTimestamp is the time this cluster was assigned to a ClusterClaim. This is only used for // ClusterDeployments belonging to ClusterPools. ClaimedTimestamp *metav1.Time `json:"claimedTimestamp,omitempty"` + // ClusterDeploymentCustomizationRef is the ClusterPool Inventory claimed customization for this ClusterDeployment + ClusterDeploymentCustomizationRef *corev1.LocalObjectReference `json:"clusterDeploymentCustomization"` } // ClusterMetadata contains metadata information about the installed cluster. diff --git a/apis/hive/v1/clusterdeploymentcustomization_types.go b/apis/hive/v1/clusterdeploymentcustomization_types.go new file mode 100644 index 00000000000..ec01b6cffad --- /dev/null +++ b/apis/hive/v1/clusterdeploymentcustomization_types.go @@ -0,0 +1,90 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:noStatus +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterDeploymentCustomization is the Schema for clusterdeploymentcustomizations API +// +kubebuilder:subresource:status +// +k8s:openapi-gen=true +// +kubebuilder:resource:scope=Namespaced +type ClusterDeploymentCustomization struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ClusterDeploymentCustomizationSpec `json:"spec"` + Status ClusterDeploymentCustomizationStatus `json:"status,omitempty"` +} + +// ClusterDeploymentCustomizationSpec defines the desired state of ClusterDeploymentCustomization +type ClusterDeploymentCustomizationSpec struct { + // TODO: documentation + InstallConfigPatches []PatchEntity `json:"installConfigPatches,omitempty"` +} + +// TODO: documentation +type PatchEntity struct { + // +required + Op string `json:"op"` + // +required + Path string `json:"path"` + // +required + Value string `json:"value"` +} + +// ClusterDeploymentCustomizationStatus defines the observed state of ClusterDeploymentCustomization +type ClusterDeploymentCustomizationStatus struct { + // TODO: documentation + // +optional + ClusterDeploymentRef *corev1.LocalObjectReference `json:"clusterDeploymentRef,omitempty"` + + // Conditions includes more detailed status for the cluster deployment customization status. + // +optional + Conditions []ClusterDeploymentCustomizationCondition `json:"conditions,omitempty"` +} + +type ClusterDeploymentCustomizationCondition struct { + // Type is the type of the condition. + Type ClusterDeploymentCustomizationConditionType `json:"type"` + // Status is the status of the condition. + Status corev1.ConditionStatus `json:"status"` + // LastProbeTime is the last time we probed the condition. + // +optional + LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"` + // LastTransitionTime is the last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + // Reason is a unique, one-word, CamelCase reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty"` + // Message is a human-readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty"` +} + +// ClusterDeploymentCustomizationConditionType is a valid value for ClusterDeploymentCustomizationCondition.Type +type ClusterDeploymentCustomizationConditionType string + +const ( + // TODO: add more types + // TODO: shorter name? + ClusterDeploymentCustomizationAvailableCondition ClusterDeploymentCustomizationConditionType = "Available" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterDeploymentCustomizationLis contains the list of ClusterDeploymentCustomization +type ClusterDeploymentCustomizationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []ClusterDeploymentCustomization `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ClusterDeploymentCustomization{}, &ClusterDeploymentCustomizationList{}) +} diff --git a/apis/hive/v1/clusterpool_types.go b/apis/hive/v1/clusterpool_types.go index 06bde7e10f8..79caaa3beef 100644 --- a/apis/hive/v1/clusterpool_types.go +++ b/apis/hive/v1/clusterpool_types.go @@ -92,6 +92,18 @@ type ClusterPoolSpec struct { // HibernationConfig configures the hibernation/resume behavior of ClusterDeployments owned by the ClusterPool. // +optional HibernationConfig *HibernationConfig `json:"hibernationConfig"` + + // Inventory maintains a list entries consumed by the clusterpool + // to customize the default the cluster deployment + // +optional + Inventory []InventoryEntry `json:"inventory,omitempty"` + + // InventoryAttempts is the number of attempts to provision a ClusterDeployment with a given inventory entry. + // On a successful provision, the inventory entry attempts status is updated to this value. + // Negative InventoryAttempts means unlimited attempts, and recommended only for debugging purposes. + // Default number of InventoryAttempts is 5. + // +optional + InventoryAttempts *int32 `json:"inventoryAttempts,omitempty"` } type HibernationConfig struct { @@ -110,6 +122,22 @@ type HibernationConfig struct { ResumeTimeout metav1.Duration `json:"resumeTimeout"` } +// InventoryEntryKind in Kind of the inventory entry +// +kubebuilder:validation:Enum="";ClusterDeploymentCustomization +type InventoryEntryKind string + +const ClusterDeploymentCustomizationInventoryEntry InventoryEntryKind = "ClusterDeploymentCustomization" + +// InventoryEntry maintains a reference to a custom resource consumed by a clusterpool to customize the cluster deployment +type InventoryEntry struct { + // Kind denotes the kind of the referenced resource. The default is ClusterDeploymentCustomization, which is also currently the only supported value. + // +optional + Kind InventoryEntryKind `json:"kind,omitempty"` + // Name is the name of the referenced resource. + // +required + Name string `json:"name,omitempty"` +} + // ClusterPoolClaimLifetime defines the lifetimes for claims for the cluster pool. type ClusterPoolClaimLifetime struct { // Default is the default lifetime of the claim when no lifetime is set on the claim itself. @@ -152,6 +180,10 @@ type ClusterPoolStatus struct { // Conditions includes more detailed status for the cluster pool // +optional Conditions []ClusterPoolCondition `json:"conditions,omitempty"` + + // Inventory tracks the individual status of inventory entries + // +optional + Inventory map[string]InventoryEntryStatus `json:"inventory,omitempty"` } // ClusterPoolCondition contains details for the current condition of a cluster pool @@ -187,8 +219,24 @@ const ( // ClusterPoolAllClustersCurrentCondition indicates whether all unassigned (installing or ready) // ClusterDeployments in the pool match the current configuration of the ClusterPool. ClusterPoolAllClustersCurrentCondition ClusterPoolConditionType = "AllClustersCurrent" + // ClusterPoolInventoryValidCondition is set to provide information on whether the cluster pool inventory is valid + ClusterPoolInventoryValidCondition ClusterPoolConditionType = "InventoryValid" ) +// InventoryEntryStatus contains current details for the related Inventory entry +type InventoryEntryStatus struct { + // Inventory Entry Kind + Kind string `json:"kind"` + // Version of resource instance + Version string `json:"version,omitempty"` + // Status of inventory resource for the cluster pool + Status string `json:"status"` + // ClusterDeployment is a reference to ClusterDeployment using the Inventory resource + ClusterDeployment *corev1.LocalObjectReference `json:"clusterDeployment,omitempty"` + // Attempts left to try using inventory entry + Attempts int32 `json:"attempts,omitempty"` +} + // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/apis/hive/v1/zz_generated.deepcopy.go b/apis/hive/v1/zz_generated.deepcopy.go index dfdd9fbed1b..5b1cebdf821 100644 --- a/apis/hive/v1/zz_generated.deepcopy.go +++ b/apis/hive/v1/zz_generated.deepcopy.go @@ -656,6 +656,134 @@ func (in *ClusterDeploymentCondition) DeepCopy() *ClusterDeploymentCondition { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeploymentCustomization) DeepCopyInto(out *ClusterDeploymentCustomization) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentCustomization. +func (in *ClusterDeploymentCustomization) DeepCopy() *ClusterDeploymentCustomization { + if in == nil { + return nil + } + out := new(ClusterDeploymentCustomization) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterDeploymentCustomization) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeploymentCustomizationCondition) DeepCopyInto(out *ClusterDeploymentCustomizationCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentCustomizationCondition. +func (in *ClusterDeploymentCustomizationCondition) DeepCopy() *ClusterDeploymentCustomizationCondition { + if in == nil { + return nil + } + out := new(ClusterDeploymentCustomizationCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeploymentCustomizationList) DeepCopyInto(out *ClusterDeploymentCustomizationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterDeploymentCustomization, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentCustomizationList. +func (in *ClusterDeploymentCustomizationList) DeepCopy() *ClusterDeploymentCustomizationList { + if in == nil { + return nil + } + out := new(ClusterDeploymentCustomizationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterDeploymentCustomizationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeploymentCustomizationSpec) DeepCopyInto(out *ClusterDeploymentCustomizationSpec) { + *out = *in + if in.InstallConfigPatches != nil { + in, out := &in.InstallConfigPatches, &out.InstallConfigPatches + *out = make([]PatchEntity, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentCustomizationSpec. +func (in *ClusterDeploymentCustomizationSpec) DeepCopy() *ClusterDeploymentCustomizationSpec { + if in == nil { + return nil + } + out := new(ClusterDeploymentCustomizationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeploymentCustomizationStatus) DeepCopyInto(out *ClusterDeploymentCustomizationStatus) { + *out = *in + if in.ClusterDeploymentRef != nil { + in, out := &in.ClusterDeploymentRef, &out.ClusterDeploymentRef + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ClusterDeploymentCustomizationCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentCustomizationStatus. +func (in *ClusterDeploymentCustomizationStatus) DeepCopy() *ClusterDeploymentCustomizationStatus { + if in == nil { + return nil + } + out := new(ClusterDeploymentCustomizationStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterDeploymentList) DeepCopyInto(out *ClusterDeploymentList) { *out = *in @@ -1316,6 +1444,11 @@ func (in *ClusterPoolReference) DeepCopyInto(out *ClusterPoolReference) { in, out := &in.ClaimedTimestamp, &out.ClaimedTimestamp *out = (*in).DeepCopy() } + if in.ClusterDeploymentCustomizationRef != nil { + in, out := &in.ClusterDeploymentCustomizationRef, &out.ClusterDeploymentCustomizationRef + *out = new(corev1.LocalObjectReference) + **out = **in + } return } @@ -1388,6 +1521,11 @@ func (in *ClusterPoolSpec) DeepCopyInto(out *ClusterPoolSpec) { *out = new(HibernationConfig) **out = **in } + if in.Inventory != nil { + in, out := &in.Inventory, &out.Inventory + *out = make([]InventoryEntry, len(*in)) + copy(*out, *in) + } return } @@ -2470,6 +2608,22 @@ func (in *IdentityProviderStatus) DeepCopy() *IdentityProviderStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InventoryEntry) DeepCopyInto(out *InventoryEntry) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InventoryEntry. +func (in *InventoryEntry) DeepCopy() *InventoryEntry { + if in == nil { + return nil + } + out := new(InventoryEntry) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KubeconfigSecretReference) DeepCopyInto(out *KubeconfigSecretReference) { *out = *in @@ -2954,6 +3108,22 @@ func (in *OvirtClusterDeprovision) DeepCopy() *OvirtClusterDeprovision { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PatchEntity) DeepCopyInto(out *PatchEntity) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PatchEntity. +func (in *PatchEntity) DeepCopy() *PatchEntity { + if in == nil { + return nil + } + out := new(PatchEntity) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Platform) DeepCopyInto(out *Platform) { *out = *in diff --git a/cmd/hiveadmission/main.go b/cmd/hiveadmission/main.go index 3c8c243f23f..45fe777814c 100644 --- a/cmd/hiveadmission/main.go +++ b/cmd/hiveadmission/main.go @@ -30,6 +30,7 @@ func main() { hivevalidatingwebhooks.NewMachinePoolValidatingAdmissionHook(decoder), hivevalidatingwebhooks.NewSyncSetValidatingAdmissionHook(decoder), hivevalidatingwebhooks.NewSelectorSyncSetValidatingAdmissionHook(decoder), + hivevalidatingwebhooks.NewClusterDeploymentCustomizationValidatingAdmissionHook(decoder), ) } diff --git a/config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml b/config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml new file mode 100644 index 00000000000..bc679c087bb --- /dev/null +++ b/config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml @@ -0,0 +1,115 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.6.0 + creationTimestamp: null + name: clusterdeploymentcustomizations.hive.openshift.io +spec: + group: hive.openshift.io + names: + kind: ClusterDeploymentCustomization + listKind: ClusterDeploymentCustomizationList + plural: clusterdeploymentcustomizations + singular: clusterdeploymentcustomization + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: ClusterDeploymentCustomization is the Schema for clusterdeploymentcustomizations + API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ClusterDeploymentCustomizationSpec defines the desired state + of ClusterDeploymentCustomization + properties: + installConfigPatches: + description: 'TODO: documentation' + items: + description: 'TODO: documentation' + properties: + op: + type: string + path: + type: string + value: + type: string + required: + - op + - path + - value + type: object + type: array + type: object + status: + description: ClusterDeploymentCustomizationStatus defines the observed + state of ClusterDeploymentCustomization + properties: + clusterDeploymentRef: + description: 'TODO: documentation' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + conditions: + description: Conditions includes more detailed status for the cluster + deployment customization status. + items: + properties: + lastProbeTime: + description: LastProbeTime is the last time we probed the condition. + format: date-time + type: string + lastTransitionTime: + description: LastTransitionTime is the last time the condition + transitioned from one status to another. + format: date-time + type: string + message: + description: Message is a human-readable message indicating + details about last transition. + type: string + reason: + description: Reason is a unique, one-word, CamelCase reason + for the condition's last transition. + type: string + status: + description: Status is the status of the condition. + type: string + type: + description: Type is the type of the condition. + type: string + required: + - status + - type + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crds/hive.openshift.io_clusterdeployments.yaml b/config/crds/hive.openshift.io_clusterdeployments.yaml index 68561bdf007..3c426488039 100644 --- a/config/crds/hive.openshift.io_clusterdeployments.yaml +++ b/config/crds/hive.openshift.io_clusterdeployments.yaml @@ -187,6 +187,15 @@ spec: belonging to ClusterPools. format: date-time type: string + clusterDeploymentCustomization: + description: ClusterDeploymentCustomizationRef is the ClusterPool + Inventory claimed customization for this ClusterDeployment + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object namespace: description: Namespace is the namespace where the ClusterPool resides. @@ -196,6 +205,7 @@ spec: the cluster was created. type: string required: + - clusterDeploymentCustomization - namespace - poolName type: object diff --git a/config/crds/hive.openshift.io_clusterpools.yaml b/config/crds/hive.openshift.io_clusterpools.yaml index 4c406208f4a..5d5fd366654 100644 --- a/config/crds/hive.openshift.io_clusterpools.yaml +++ b/config/crds/hive.openshift.io_clusterpools.yaml @@ -152,6 +152,35 @@ spec: TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object + inventory: + description: Inventory maintains a list entries consumed by the clusterpool + to customize the default the cluster deployment + items: + description: InventoryEntry maintains a reference to a custom resource + consumed by a clusterpool to customize the cluster deployment + properties: + kind: + description: Kind denotes the kind of the referenced resource. + The default is ClusterDeploymentCustomization, which is also + currently the only supported value. + enum: + - "" + - ClusterDeploymentCustomization + type: string + name: + description: Name is the name of the referenced resource. + type: string + type: object + type: array + inventoryAttempts: + description: InventoryAttempts is the number of attempts to provision + a ClusterDeployment with a given inventory entry. On a successful + provision, the inventory entry attempts status is updated to this + value. Negative InventoryAttempts means unlimited attempts, and + recommended only for debugging purposes. Default number of InventoryAttempts + is 5. + format: int32 + type: integer labels: additionalProperties: type: string @@ -601,6 +630,39 @@ spec: - type type: object type: array + inventory: + additionalProperties: + description: InventoryEntryStatus contains current details for the + related Inventory entry + properties: + attempts: + description: Attempts left to try using inventory entry + format: int32 + type: integer + clusterDeployment: + description: ClusterDeployment is a reference to ClusterDeployment + using the Inventory resource + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + kind: + description: Inventory Entry Kind + type: string + status: + description: Status of inventory resource for the cluster pool + type: string + version: + description: Version of resource instance + type: string + required: + - kind + - status + type: object + description: Inventory tracks the individual status of inventory entries + type: object ready: description: Ready is the number of unclaimed clusters that are installed and are running and ready to be claimed. diff --git a/hack/app-sre/kustomization.yaml b/hack/app-sre/kustomization.yaml index 5941b588da1..9c3ff4d672f 100644 --- a/hack/app-sre/kustomization.yaml +++ b/hack/app-sre/kustomization.yaml @@ -29,6 +29,7 @@ resources: - ../../config/crds/hive.openshift.io_selectorsyncsets.yaml - ../../config/crds/hive.openshift.io_syncidentityproviders.yaml - ../../config/crds/hive.openshift.io_syncsets.yaml +- ../../config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization # Use app-sre-supplied variables to pull the image for the current commit diff --git a/hack/app-sre/saas-template.yaml b/hack/app-sre/saas-template.yaml index 7d8ec0f2f39..4de7e6f6240 100644 --- a/hack/app-sre/saas-template.yaml +++ b/hack/app-sre/saas-template.yaml @@ -251,6 +251,123 @@ objects: plural: '' conditions: [] storedVersions: [] +- apiVersion: apiextensions.k8s.io/v1 + kind: CustomResourceDefinition + metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.6.0 + creationTimestamp: null + name: clusterdeploymentcustomizations.hive.openshift.io + spec: + group: hive.openshift.io + names: + kind: ClusterDeploymentCustomization + listKind: ClusterDeploymentCustomizationList + plural: clusterdeploymentcustomizations + singular: clusterdeploymentcustomization + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: ClusterDeploymentCustomization is the Schema for clusterdeploymentcustomizations + API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint the + client submits requests to. Cannot be updated. In CamelCase. More + info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ClusterDeploymentCustomizationSpec defines the desired + state of ClusterDeploymentCustomization + properties: + installConfigPatches: + description: 'TODO: documentation' + items: + description: 'TODO: documentation' + properties: + op: + type: string + path: + type: string + value: + type: string + required: + - op + - path + - value + type: object + type: array + type: object + status: + description: ClusterDeploymentCustomizationStatus defines the observed + state of ClusterDeploymentCustomization + properties: + clusterDeploymentRef: + description: 'TODO: documentation' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + conditions: + description: Conditions includes more detailed status for the cluster + deployment customization status. + items: + properties: + lastProbeTime: + description: LastProbeTime is the last time we probed the + condition. + format: date-time + type: string + lastTransitionTime: + description: LastTransitionTime is the last time the condition + transitioned from one status to another. + format: date-time + type: string + message: + description: Message is a human-readable message indicating + details about last transition. + type: string + reason: + description: Reason is a unique, one-word, CamelCase reason + for the condition's last transition. + type: string + status: + description: Status is the status of the condition. + type: string + type: + description: Type is the type of the condition. + type: string + required: + - status + - type + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + status: + acceptedNames: + kind: '' + plural: '' + conditions: [] + storedVersions: [] - apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: @@ -442,6 +559,15 @@ objects: belonging to ClusterPools. format: date-time type: string + clusterDeploymentCustomization: + description: ClusterDeploymentCustomizationRef is the ClusterPool + Inventory claimed customization for this ClusterDeployment + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object namespace: description: Namespace is the namespace where the ClusterPool resides. @@ -451,6 +577,7 @@ objects: the cluster was created. type: string required: + - clusterDeploymentCustomization - namespace - poolName type: object @@ -1912,6 +2039,36 @@ objects: TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object + inventory: + description: Inventory maintains a list entries consumed by the + clusterpool to customize the default the cluster deployment + items: + description: InventoryEntry maintains a reference to a custom + resource consumed by a clusterpool to customize the cluster + deployment + properties: + kind: + description: Kind denotes the kind of the referenced resource. + The default is ClusterDeploymentCustomization, which is + also currently the only supported value. + enum: + - '' + - ClusterDeploymentCustomization + type: string + name: + description: Name is the name of the referenced resource. + type: string + type: object + type: array + inventoryAttempts: + description: InventoryAttempts is the number of attempts to provision + a ClusterDeployment with a given inventory entry. On a successful + provision, the inventory entry attempts status is updated to this + value. Negative InventoryAttempts means unlimited attempts, and + recommended only for debugging purposes. Default number of InventoryAttempts + is 5. + format: int32 + type: integer labels: additionalProperties: type: string @@ -2369,6 +2526,41 @@ objects: - type type: object type: array + inventory: + additionalProperties: + description: InventoryEntryStatus contains current details for + the related Inventory entry + properties: + attempts: + description: Attempts left to try using inventory entry + format: int32 + type: integer + clusterDeployment: + description: ClusterDeployment is a reference to ClusterDeployment + using the Inventory resource + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + kind: + description: Inventory Entry Kind + type: string + status: + description: Status of inventory resource for the cluster + pool + type: string + version: + description: Version of resource instance + type: string + required: + - kind + - status + type: object + description: Inventory tracks the individual status of inventory + entries + type: object ready: description: Ready is the number of unclaimed clusters that are installed and are running and ready to be claimed. diff --git a/pkg/client/clientset/versioned/typed/hive/v1/clusterdeploymentcustomization.go b/pkg/client/clientset/versioned/typed/hive/v1/clusterdeploymentcustomization.go new file mode 100644 index 00000000000..dd7ce990a81 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/hive/v1/clusterdeploymentcustomization.go @@ -0,0 +1,162 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/openshift/hive/apis/hive/v1" + scheme "github.com/openshift/hive/pkg/client/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ClusterDeploymentCustomizationsGetter has a method to return a ClusterDeploymentCustomizationInterface. +// A group's client should implement this interface. +type ClusterDeploymentCustomizationsGetter interface { + ClusterDeploymentCustomizations(namespace string) ClusterDeploymentCustomizationInterface +} + +// ClusterDeploymentCustomizationInterface has methods to work with ClusterDeploymentCustomization resources. +type ClusterDeploymentCustomizationInterface interface { + Create(ctx context.Context, clusterDeploymentCustomization *v1.ClusterDeploymentCustomization, opts metav1.CreateOptions) (*v1.ClusterDeploymentCustomization, error) + Update(ctx context.Context, clusterDeploymentCustomization *v1.ClusterDeploymentCustomization, opts metav1.UpdateOptions) (*v1.ClusterDeploymentCustomization, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ClusterDeploymentCustomization, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.ClusterDeploymentCustomizationList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterDeploymentCustomization, err error) + ClusterDeploymentCustomizationExpansion +} + +// clusterDeploymentCustomizations implements ClusterDeploymentCustomizationInterface +type clusterDeploymentCustomizations struct { + client rest.Interface + ns string +} + +// newClusterDeploymentCustomizations returns a ClusterDeploymentCustomizations +func newClusterDeploymentCustomizations(c *HiveV1Client, namespace string) *clusterDeploymentCustomizations { + return &clusterDeploymentCustomizations{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the clusterDeploymentCustomization, and returns the corresponding clusterDeploymentCustomization object, and an error if there is any. +func (c *clusterDeploymentCustomizations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterDeploymentCustomization, err error) { + result = &v1.ClusterDeploymentCustomization{} + err = c.client.Get(). + Namespace(c.ns). + Resource("clusterdeploymentcustomizations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterDeploymentCustomizations that match those selectors. +func (c *clusterDeploymentCustomizations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterDeploymentCustomizationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.ClusterDeploymentCustomizationList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("clusterdeploymentcustomizations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterDeploymentCustomizations. +func (c *clusterDeploymentCustomizations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("clusterdeploymentcustomizations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a clusterDeploymentCustomization and creates it. Returns the server's representation of the clusterDeploymentCustomization, and an error, if there is any. +func (c *clusterDeploymentCustomizations) Create(ctx context.Context, clusterDeploymentCustomization *v1.ClusterDeploymentCustomization, opts metav1.CreateOptions) (result *v1.ClusterDeploymentCustomization, err error) { + result = &v1.ClusterDeploymentCustomization{} + err = c.client.Post(). + Namespace(c.ns). + Resource("clusterdeploymentcustomizations"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(clusterDeploymentCustomization). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a clusterDeploymentCustomization and updates it. Returns the server's representation of the clusterDeploymentCustomization, and an error, if there is any. +func (c *clusterDeploymentCustomizations) Update(ctx context.Context, clusterDeploymentCustomization *v1.ClusterDeploymentCustomization, opts metav1.UpdateOptions) (result *v1.ClusterDeploymentCustomization, err error) { + result = &v1.ClusterDeploymentCustomization{} + err = c.client.Put(). + Namespace(c.ns). + Resource("clusterdeploymentcustomizations"). + Name(clusterDeploymentCustomization.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(clusterDeploymentCustomization). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the clusterDeploymentCustomization and deletes it. Returns an error if one occurs. +func (c *clusterDeploymentCustomizations) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("clusterdeploymentcustomizations"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterDeploymentCustomizations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("clusterdeploymentcustomizations"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched clusterDeploymentCustomization. +func (c *clusterDeploymentCustomizations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterDeploymentCustomization, err error) { + result = &v1.ClusterDeploymentCustomization{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("clusterdeploymentcustomizations"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/pkg/client/clientset/versioned/typed/hive/v1/fake/fake_clusterdeploymentcustomization.go b/pkg/client/clientset/versioned/typed/hive/v1/fake/fake_clusterdeploymentcustomization.go new file mode 100644 index 00000000000..e775114dae9 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/hive/v1/fake/fake_clusterdeploymentcustomization.go @@ -0,0 +1,114 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + hivev1 "github.com/openshift/hive/apis/hive/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeClusterDeploymentCustomizations implements ClusterDeploymentCustomizationInterface +type FakeClusterDeploymentCustomizations struct { + Fake *FakeHiveV1 + ns string +} + +var clusterdeploymentcustomizationsResource = schema.GroupVersionResource{Group: "hive.openshift.io", Version: "v1", Resource: "clusterdeploymentcustomizations"} + +var clusterdeploymentcustomizationsKind = schema.GroupVersionKind{Group: "hive.openshift.io", Version: "v1", Kind: "ClusterDeploymentCustomization"} + +// Get takes name of the clusterDeploymentCustomization, and returns the corresponding clusterDeploymentCustomization object, and an error if there is any. +func (c *FakeClusterDeploymentCustomizations) Get(ctx context.Context, name string, options v1.GetOptions) (result *hivev1.ClusterDeploymentCustomization, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(clusterdeploymentcustomizationsResource, c.ns, name), &hivev1.ClusterDeploymentCustomization{}) + + if obj == nil { + return nil, err + } + return obj.(*hivev1.ClusterDeploymentCustomization), err +} + +// List takes label and field selectors, and returns the list of ClusterDeploymentCustomizations that match those selectors. +func (c *FakeClusterDeploymentCustomizations) List(ctx context.Context, opts v1.ListOptions) (result *hivev1.ClusterDeploymentCustomizationList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(clusterdeploymentcustomizationsResource, clusterdeploymentcustomizationsKind, c.ns, opts), &hivev1.ClusterDeploymentCustomizationList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &hivev1.ClusterDeploymentCustomizationList{ListMeta: obj.(*hivev1.ClusterDeploymentCustomizationList).ListMeta} + for _, item := range obj.(*hivev1.ClusterDeploymentCustomizationList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested clusterDeploymentCustomizations. +func (c *FakeClusterDeploymentCustomizations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(clusterdeploymentcustomizationsResource, c.ns, opts)) + +} + +// Create takes the representation of a clusterDeploymentCustomization and creates it. Returns the server's representation of the clusterDeploymentCustomization, and an error, if there is any. +func (c *FakeClusterDeploymentCustomizations) Create(ctx context.Context, clusterDeploymentCustomization *hivev1.ClusterDeploymentCustomization, opts v1.CreateOptions) (result *hivev1.ClusterDeploymentCustomization, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(clusterdeploymentcustomizationsResource, c.ns, clusterDeploymentCustomization), &hivev1.ClusterDeploymentCustomization{}) + + if obj == nil { + return nil, err + } + return obj.(*hivev1.ClusterDeploymentCustomization), err +} + +// Update takes the representation of a clusterDeploymentCustomization and updates it. Returns the server's representation of the clusterDeploymentCustomization, and an error, if there is any. +func (c *FakeClusterDeploymentCustomizations) Update(ctx context.Context, clusterDeploymentCustomization *hivev1.ClusterDeploymentCustomization, opts v1.UpdateOptions) (result *hivev1.ClusterDeploymentCustomization, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(clusterdeploymentcustomizationsResource, c.ns, clusterDeploymentCustomization), &hivev1.ClusterDeploymentCustomization{}) + + if obj == nil { + return nil, err + } + return obj.(*hivev1.ClusterDeploymentCustomization), err +} + +// Delete takes name of the clusterDeploymentCustomization and deletes it. Returns an error if one occurs. +func (c *FakeClusterDeploymentCustomizations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(clusterdeploymentcustomizationsResource, c.ns, name, opts), &hivev1.ClusterDeploymentCustomization{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeClusterDeploymentCustomizations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(clusterdeploymentcustomizationsResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &hivev1.ClusterDeploymentCustomizationList{}) + return err +} + +// Patch applies the patch and returns the patched clusterDeploymentCustomization. +func (c *FakeClusterDeploymentCustomizations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *hivev1.ClusterDeploymentCustomization, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(clusterdeploymentcustomizationsResource, c.ns, name, pt, data, subresources...), &hivev1.ClusterDeploymentCustomization{}) + + if obj == nil { + return nil, err + } + return obj.(*hivev1.ClusterDeploymentCustomization), err +} diff --git a/pkg/client/clientset/versioned/typed/hive/v1/fake/fake_hive_client.go b/pkg/client/clientset/versioned/typed/hive/v1/fake/fake_hive_client.go index d52a518c108..06f12520fe7 100644 --- a/pkg/client/clientset/versioned/typed/hive/v1/fake/fake_hive_client.go +++ b/pkg/client/clientset/versioned/typed/hive/v1/fake/fake_hive_client.go @@ -24,6 +24,10 @@ func (c *FakeHiveV1) ClusterDeployments(namespace string) v1.ClusterDeploymentIn return &FakeClusterDeployments{c, namespace} } +func (c *FakeHiveV1) ClusterDeploymentCustomizations(namespace string) v1.ClusterDeploymentCustomizationInterface { + return &FakeClusterDeploymentCustomizations{c, namespace} +} + func (c *FakeHiveV1) ClusterDeprovisions(namespace string) v1.ClusterDeprovisionInterface { return &FakeClusterDeprovisions{c, namespace} } diff --git a/pkg/client/clientset/versioned/typed/hive/v1/generated_expansion.go b/pkg/client/clientset/versioned/typed/hive/v1/generated_expansion.go index 600401a271d..951ab87652b 100644 --- a/pkg/client/clientset/versioned/typed/hive/v1/generated_expansion.go +++ b/pkg/client/clientset/versioned/typed/hive/v1/generated_expansion.go @@ -8,6 +8,8 @@ type ClusterClaimExpansion interface{} type ClusterDeploymentExpansion interface{} +type ClusterDeploymentCustomizationExpansion interface{} + type ClusterDeprovisionExpansion interface{} type ClusterImageSetExpansion interface{} diff --git a/pkg/client/clientset/versioned/typed/hive/v1/hive_client.go b/pkg/client/clientset/versioned/typed/hive/v1/hive_client.go index 0652d984747..e0ea9fe9f1a 100644 --- a/pkg/client/clientset/versioned/typed/hive/v1/hive_client.go +++ b/pkg/client/clientset/versioned/typed/hive/v1/hive_client.go @@ -15,6 +15,7 @@ type HiveV1Interface interface { CheckpointsGetter ClusterClaimsGetter ClusterDeploymentsGetter + ClusterDeploymentCustomizationsGetter ClusterDeprovisionsGetter ClusterImageSetsGetter ClusterPoolsGetter @@ -48,6 +49,10 @@ func (c *HiveV1Client) ClusterDeployments(namespace string) ClusterDeploymentInt return newClusterDeployments(c, namespace) } +func (c *HiveV1Client) ClusterDeploymentCustomizations(namespace string) ClusterDeploymentCustomizationInterface { + return newClusterDeploymentCustomizations(c, namespace) +} + func (c *HiveV1Client) ClusterDeprovisions(namespace string) ClusterDeprovisionInterface { return newClusterDeprovisions(c, namespace) } diff --git a/pkg/client/informers/externalversions/generic.go b/pkg/client/informers/externalversions/generic.go index 71f7068525e..296f8270454 100644 --- a/pkg/client/informers/externalversions/generic.go +++ b/pkg/client/informers/externalversions/generic.go @@ -44,6 +44,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Hive().V1().ClusterClaims().Informer()}, nil case v1.SchemeGroupVersion.WithResource("clusterdeployments"): return &genericInformer{resource: resource.GroupResource(), informer: f.Hive().V1().ClusterDeployments().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("clusterdeploymentcustomizations"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Hive().V1().ClusterDeploymentCustomizations().Informer()}, nil case v1.SchemeGroupVersion.WithResource("clusterdeprovisions"): return &genericInformer{resource: resource.GroupResource(), informer: f.Hive().V1().ClusterDeprovisions().Informer()}, nil case v1.SchemeGroupVersion.WithResource("clusterimagesets"): diff --git a/pkg/client/informers/externalversions/hive/v1/clusterdeploymentcustomization.go b/pkg/client/informers/externalversions/hive/v1/clusterdeploymentcustomization.go new file mode 100644 index 00000000000..637b3b707f7 --- /dev/null +++ b/pkg/client/informers/externalversions/hive/v1/clusterdeploymentcustomization.go @@ -0,0 +1,74 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + hivev1 "github.com/openshift/hive/apis/hive/v1" + versioned "github.com/openshift/hive/pkg/client/clientset/versioned" + internalinterfaces "github.com/openshift/hive/pkg/client/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/hive/pkg/client/listers/hive/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ClusterDeploymentCustomizationInformer provides access to a shared informer and lister for +// ClusterDeploymentCustomizations. +type ClusterDeploymentCustomizationInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ClusterDeploymentCustomizationLister +} + +type clusterDeploymentCustomizationInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewClusterDeploymentCustomizationInformer constructs a new informer for ClusterDeploymentCustomization type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewClusterDeploymentCustomizationInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterDeploymentCustomizationInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredClusterDeploymentCustomizationInformer constructs a new informer for ClusterDeploymentCustomization type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredClusterDeploymentCustomizationInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.HiveV1().ClusterDeploymentCustomizations(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.HiveV1().ClusterDeploymentCustomizations(namespace).Watch(context.TODO(), options) + }, + }, + &hivev1.ClusterDeploymentCustomization{}, + resyncPeriod, + indexers, + ) +} + +func (f *clusterDeploymentCustomizationInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterDeploymentCustomizationInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *clusterDeploymentCustomizationInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&hivev1.ClusterDeploymentCustomization{}, f.defaultInformer) +} + +func (f *clusterDeploymentCustomizationInformer) Lister() v1.ClusterDeploymentCustomizationLister { + return v1.NewClusterDeploymentCustomizationLister(f.Informer().GetIndexer()) +} diff --git a/pkg/client/informers/externalversions/hive/v1/interface.go b/pkg/client/informers/externalversions/hive/v1/interface.go index d73a5da9ab5..0659201eae6 100644 --- a/pkg/client/informers/externalversions/hive/v1/interface.go +++ b/pkg/client/informers/externalversions/hive/v1/interface.go @@ -14,6 +14,8 @@ type Interface interface { ClusterClaims() ClusterClaimInformer // ClusterDeployments returns a ClusterDeploymentInformer. ClusterDeployments() ClusterDeploymentInformer + // ClusterDeploymentCustomizations returns a ClusterDeploymentCustomizationInformer. + ClusterDeploymentCustomizations() ClusterDeploymentCustomizationInformer // ClusterDeprovisions returns a ClusterDeprovisionInformer. ClusterDeprovisions() ClusterDeprovisionInformer // ClusterImageSets returns a ClusterImageSetInformer. @@ -70,6 +72,11 @@ func (v *version) ClusterDeployments() ClusterDeploymentInformer { return &clusterDeploymentInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } +// ClusterDeploymentCustomizations returns a ClusterDeploymentCustomizationInformer. +func (v *version) ClusterDeploymentCustomizations() ClusterDeploymentCustomizationInformer { + return &clusterDeploymentCustomizationInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + // ClusterDeprovisions returns a ClusterDeprovisionInformer. func (v *version) ClusterDeprovisions() ClusterDeprovisionInformer { return &clusterDeprovisionInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} diff --git a/pkg/client/listers/hive/v1/clusterdeploymentcustomization.go b/pkg/client/listers/hive/v1/clusterdeploymentcustomization.go new file mode 100644 index 00000000000..dc1ff4923ad --- /dev/null +++ b/pkg/client/listers/hive/v1/clusterdeploymentcustomization.go @@ -0,0 +1,83 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/hive/apis/hive/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ClusterDeploymentCustomizationLister helps list ClusterDeploymentCustomizations. +// All objects returned here must be treated as read-only. +type ClusterDeploymentCustomizationLister interface { + // List lists all ClusterDeploymentCustomizations in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ClusterDeploymentCustomization, err error) + // ClusterDeploymentCustomizations returns an object that can list and get ClusterDeploymentCustomizations. + ClusterDeploymentCustomizations(namespace string) ClusterDeploymentCustomizationNamespaceLister + ClusterDeploymentCustomizationListerExpansion +} + +// clusterDeploymentCustomizationLister implements the ClusterDeploymentCustomizationLister interface. +type clusterDeploymentCustomizationLister struct { + indexer cache.Indexer +} + +// NewClusterDeploymentCustomizationLister returns a new ClusterDeploymentCustomizationLister. +func NewClusterDeploymentCustomizationLister(indexer cache.Indexer) ClusterDeploymentCustomizationLister { + return &clusterDeploymentCustomizationLister{indexer: indexer} +} + +// List lists all ClusterDeploymentCustomizations in the indexer. +func (s *clusterDeploymentCustomizationLister) List(selector labels.Selector) (ret []*v1.ClusterDeploymentCustomization, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ClusterDeploymentCustomization)) + }) + return ret, err +} + +// ClusterDeploymentCustomizations returns an object that can list and get ClusterDeploymentCustomizations. +func (s *clusterDeploymentCustomizationLister) ClusterDeploymentCustomizations(namespace string) ClusterDeploymentCustomizationNamespaceLister { + return clusterDeploymentCustomizationNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ClusterDeploymentCustomizationNamespaceLister helps list and get ClusterDeploymentCustomizations. +// All objects returned here must be treated as read-only. +type ClusterDeploymentCustomizationNamespaceLister interface { + // List lists all ClusterDeploymentCustomizations in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ClusterDeploymentCustomization, err error) + // Get retrieves the ClusterDeploymentCustomization from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.ClusterDeploymentCustomization, error) + ClusterDeploymentCustomizationNamespaceListerExpansion +} + +// clusterDeploymentCustomizationNamespaceLister implements the ClusterDeploymentCustomizationNamespaceLister +// interface. +type clusterDeploymentCustomizationNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ClusterDeploymentCustomizations in the indexer for a given namespace. +func (s clusterDeploymentCustomizationNamespaceLister) List(selector labels.Selector) (ret []*v1.ClusterDeploymentCustomization, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ClusterDeploymentCustomization)) + }) + return ret, err +} + +// Get retrieves the ClusterDeploymentCustomization from the indexer for a given namespace and name. +func (s clusterDeploymentCustomizationNamespaceLister) Get(name string) (*v1.ClusterDeploymentCustomization, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("clusterdeploymentcustomization"), name) + } + return obj.(*v1.ClusterDeploymentCustomization), nil +} diff --git a/pkg/client/listers/hive/v1/expansion_generated.go b/pkg/client/listers/hive/v1/expansion_generated.go index 2f913b5fa55..b07f9b98377 100644 --- a/pkg/client/listers/hive/v1/expansion_generated.go +++ b/pkg/client/listers/hive/v1/expansion_generated.go @@ -26,6 +26,14 @@ type ClusterDeploymentListerExpansion interface{} // ClusterDeploymentNamespaceLister. type ClusterDeploymentNamespaceListerExpansion interface{} +// ClusterDeploymentCustomizationListerExpansion allows custom methods to be added to +// ClusterDeploymentCustomizationLister. +type ClusterDeploymentCustomizationListerExpansion interface{} + +// ClusterDeploymentCustomizationNamespaceListerExpansion allows custom methods to be added to +// ClusterDeploymentCustomizationNamespaceLister. +type ClusterDeploymentCustomizationNamespaceListerExpansion interface{} + // ClusterDeprovisionListerExpansion allows custom methods to be added to // ClusterDeprovisionLister. type ClusterDeprovisionListerExpansion interface{} diff --git a/pkg/clusterresource/openstack.go b/pkg/clusterresource/openstack.go index ee6be5fa338..13afe6490e9 100644 --- a/pkg/clusterresource/openstack.go +++ b/pkg/clusterresource/openstack.go @@ -15,6 +15,12 @@ import ( "github.com/openshift/hive/pkg/constants" ) +const ( + computeFlavor = "m1.large" + masterFlavor = "c1.m4.xlarge" + externalNetwork = "provider_net_shared_3" +) + var _ CloudBuilder = (*OpenStackCloudBuilder)(nil) // OpenStackCloudBuilder encapsulates cluster artifact generation logic specific to OpenStack. @@ -43,6 +49,13 @@ type OpenStackCloudBuilder struct { MasterFlavor string } +func NewOpenStackCloudBuilderFromSecret(credsSecret *corev1.Secret) *OpenStackCloudBuilder { + cloudsYamlContent := credsSecret.Data[constants.OpenStackCredentialsName] + return &OpenStackCloudBuilder{ + CloudsYAMLContent: cloudsYamlContent, + } +} + func (p *OpenStackCloudBuilder) GenerateCredentialsSecret(o *Builder) *corev1.Secret { return &corev1.Secret{ TypeMeta: metav1.TypeMeta{ @@ -80,19 +93,18 @@ func (p *OpenStackCloudBuilder) addMachinePoolPlatform(o *Builder, mp *hivev1.Ma func (p *OpenStackCloudBuilder) addInstallConfigPlatform(o *Builder, ic *installertypes.InstallConfig) { ic.Platform = installertypes.Platform{ OpenStack: &installeropenstack.Platform{ - Cloud: p.Cloud, - ExternalNetwork: p.ExternalNetwork, - DeprecatedFlavorName: p.ComputeFlavor, + ExternalNetwork: externalNetwork, + DeprecatedFlavorName: computeFlavor, APIFloatingIP: p.APIFloatingIP, IngressFloatingIP: p.IngressFloatingIP, }, } ic.Compute[0].Platform.OpenStack = &installeropenstack.MachinePool{ - FlavorName: p.ComputeFlavor, + FlavorName: computeFlavor, } ic.ControlPlane.Platform.OpenStack = &installeropenstack.MachinePool{ - FlavorName: p.MasterFlavor, + FlavorName: masterFlavor, } } diff --git a/pkg/controller/clusterdeployment/clusterdeployment_controller.go b/pkg/controller/clusterdeployment/clusterdeployment_controller.go index 3b9f23a0099..a6e8fdc1281 100644 --- a/pkg/controller/clusterdeployment/clusterdeployment_controller.go +++ b/pkg/controller/clusterdeployment/clusterdeployment_controller.go @@ -562,13 +562,21 @@ func (r *ReconcileClusterDeployment) reconcile(request reconcile.Request, cd *hi if !controllerutils.HasFinalizer(cd, hivev1.FinalizerDeprovision) { cdLog.Debugf("adding clusterdeployment finalizer") if err := r.addClusterDeploymentFinalizer(cd); err != nil { - cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error adding finalizer") + cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error adding deprovision finalizer") return reconcile.Result{}, err } metricClustersCreated.WithLabelValues(hivemetrics.GetClusterDeploymentType(cd)).Inc() return reconcile.Result{}, nil } + if cd.Spec.ClusterPoolRef != nil && cd.Spec.ClusterPoolRef.ClusterDeploymentCustomizationRef != nil && !controllerutils.HasFinalizer(cd, hivev1.FinalizerCustomizationRelease) { + cdLog.Debugf("adding customization finalizer") + if err := r.addClusterDeploymentCustomizationFinalizer(cd); err != nil { + cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error adding customization finalizer") + return reconcile.Result{}, err + } + } + if cd.Spec.ManageDNS { dnsZone, err := r.ensureManagedDNSZone(cd, cdLog) if err != nil { @@ -1159,6 +1167,19 @@ func (r *ReconcileClusterDeployment) setClusterStatusURLs(cd *hivev1.ClusterDepl return reconcile.Result{}, nil } +func (r *ReconcileClusterDeployment) ensureClusterDeploymentCustomizationIsReleased(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (gone bool, returnErr error) { + if cd.Spec.ClusterPoolRef == nil || cd.Spec.ClusterPoolRef.ClusterDeploymentCustomizationRef == nil { + return true, nil + } + + if err := r.releaseClusterDeploymentCustomization(cd, cdLog); err != nil { + cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error releasing inventory customization") + return false, err + } + + return true, nil +} + // ensureManagedDNSZoneDeleted is a safety check to ensure that the child managed DNSZone // linked to the parent cluster deployment gets a deletionTimestamp when the parent is deleted. // Normally we expect Kube garbage collection to do this for us, but in rare cases we've seen it @@ -1358,13 +1379,20 @@ func (r *ReconcileClusterDeployment) syncDeletedClusterDeployment(cd *hivev1.Clu return reconcile.Result{}, err } + customizationReleased, err := r.ensureClusterDeploymentCustomizationIsReleased(cd, cdLog) + if err != nil { + return reconcile.Result{}, err + } + switch { case !deprovisioned: return reconcile.Result{}, nil case !dnsZoneGone: return reconcile.Result{RequeueAfter: defaultRequeueTime}, nil + case !customizationReleased: + return reconcile.Result{}, nil default: - cdLog.Infof("DNSZone gone and deprovision request completed, removing finalizer") + cdLog.Infof("DNSZone gone, customization released and deprovision request completed, removing finalizer") if err := r.removeClusterDeploymentFinalizer(cd, cdLog); err != nil { cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error removing finalizer") return reconcile.Result{}, err @@ -1379,6 +1407,12 @@ func (r *ReconcileClusterDeployment) addClusterDeploymentFinalizer(cd *hivev1.Cl return r.Update(context.TODO(), cd) } +func (r *ReconcileClusterDeployment) addClusterDeploymentCustomizationFinalizer(cd *hivev1.ClusterDeployment) error { + cd = cd.DeepCopy() + controllerutils.AddFinalizer(cd, hivev1.FinalizerCustomizationRelease) + return r.Update(context.TODO(), cd) +} + func (r *ReconcileClusterDeployment) removeClusterDeploymentFinalizer(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) error { cd = cd.DeepCopy() @@ -1395,6 +1429,43 @@ func (r *ReconcileClusterDeployment) removeClusterDeploymentFinalizer(cd *hivev1 return nil } +func (r *ReconcileClusterDeployment) releaseClusterDeploymentCustomization(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) error { + customizationRef := cd.Spec.ClusterPoolRef.ClusterDeploymentCustomizationRef + namespace := types.NamespacedName{Namespace: cd.Namespace, Name: customizationRef.Name} + cdc := &hivev1.ClusterDeploymentCustomization{} + err := r.Get(context.TODO(), namespace, cdc) + if err != nil { + if apierrors.IsNotFound(err) { + cdLog.Infof("customization not found: %s/%s, nothing to release", cd.Namespace, customizationRef.Name) + controllerutils.DeleteFinalizer(cd, hivev1.FinalizerCustomizationRelease) + return nil + } + log.WithError(err).Error("error reading customization") + return err + } + + cdc.Status.ClusterDeploymentRef = nil + cdc.Status.Conditions = controllerutils.SetClusterDeploymentCustomizationCondition( + cdc.Status.Conditions, + hivev1.ClusterDeploymentCustomizationAvailableCondition, + corev1.ConditionTrue, + "ClusterDeploymentCustomizationAvailable", + "Cluster Deployment Customization is available", + controllerutils.UpdateConditionIfReasonOrMessageChange, + ) + + if err := r.Update(context.TODO(), cdc); err != nil { + return err + } + + controllerutils.DeleteFinalizer(cd, hivev1.FinalizerCustomizationRelease) + if err := r.Update(context.TODO(), cd); err != nil { + return err + } + + return nil +} + // setDNSDelayMetric will calculate the amount of time elapsed from clusterdeployment creation // to when the dnszone became ready, and set a metric to report the delay. // Will return a bool indicating whether the clusterdeployment has been modified, and whether any error was encountered. diff --git a/pkg/controller/clusterdeployment/clusterprovisions.go b/pkg/controller/clusterdeployment/clusterprovisions.go index 7b2194cc25c..48147c40011 100644 --- a/pkg/controller/clusterdeployment/clusterprovisions.go +++ b/pkg/controller/clusterdeployment/clusterprovisions.go @@ -572,7 +572,6 @@ func readProvisionFailedConfig() (*hivev1.FailedProvisionConfig, error) { if len(path) == 0 { return nil, nil } - config := &hivev1.FailedProvisionConfig{} fileBytes, err := readFile(path) diff --git a/pkg/controller/clusterpool/clusterpool_controller.go b/pkg/controller/clusterpool/clusterpool_controller.go index a7076201178..c725424a64b 100644 --- a/pkg/controller/clusterpool/clusterpool_controller.go +++ b/pkg/controller/clusterpool/clusterpool_controller.go @@ -2,16 +2,19 @@ package clusterpool import ( "context" + "encoding/json" "fmt" "math" "reflect" "sort" + "github.com/ghodss/yaml" "github.com/pkg/errors" log "github.com/sirupsen/logrus" utilerrors "k8s.io/apimachinery/pkg/util/errors" "github.com/davegardnerisme/deephash" + jsonpatch "github.com/evanphx/json-patch" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -46,6 +49,7 @@ const ( icSecretDependent = "install config template secret" cdClusterPoolIndex = "spec.clusterpool.namespacedname" claimClusterPoolIndex = "spec.clusterpoolname" + defaultInventoryAttempts = 5 ) var ( @@ -54,6 +58,7 @@ var ( hivev1.ClusterPoolMissingDependenciesCondition, hivev1.ClusterPoolCapacityAvailableCondition, hivev1.ClusterPoolAllClustersCurrentCondition, + hivev1.ClusterPoolInventoryValidCondition, } ) @@ -263,6 +268,11 @@ func (r *ReconcileClusterPool) Reconcile(ctx context.Context, request reconcile. return reconcile.Result{}, nil } + // Initialize and updated cluster pool inventory + if err := r.updateInventory(clp, logger); err != nil { + return reconcile.Result{}, err + } + // If the pool is deleted, clear finalizer once all ClusterDeployments have been deleted. if clp.DeletionTimestamp != nil { return reconcile.Result{}, r.reconcileDeletedPool(clp, logger) @@ -480,6 +490,12 @@ func calculatePoolVersion(clp *hivev1.ClusterPool) string { ba = append(ba, deephash.Hash(clp.Spec.BaseDomain)...) ba = append(ba, deephash.Hash(clp.Spec.ImageSetRef)...) ba = append(ba, deephash.Hash(clp.Spec.InstallConfigSecretTemplateRef)...) + // Inventory changes the behavior of cluster pool, thus it needs to be in the pool version. + // But to avoid redployment of clusters if inventory changes, a fixed string is added to pool version. + // https://github.com/openshift/hive/blob/master/docs/enhancements/clusterpool-inventory.md#pool-version + if clp.Spec.Inventory != nil { + ba = append(ba, []byte("hasInventory")...) + } // Hash of hashes to ensure fixed length return fmt.Sprintf("%x", deephash.Hash(ba)) } @@ -667,6 +683,13 @@ func (r *ReconcileClusterPool) createCluster( } annotations[constants.ClusterDeploymentPoolSpecHashAnnotation] = poolVersion + cdc := &hivev1.ClusterDeploymentCustomization{} + if clp.Spec.Inventory != nil { + if cdc, err = r.getInventoryCustomization(clp, logger); err != nil { + return nil, err + } + } + // We will use this unique random namespace name for our cluster name. builder := &clusterresource.Builder{ Name: ns.Name, @@ -696,6 +719,7 @@ func (r *ReconcileClusterPool) createCluster( poolKey := types.NamespacedName{Namespace: clp.Namespace, Name: clp.Name}.String() r.expectations.ExpectCreations(poolKey, 1) var cd *hivev1.ClusterDeployment + var ics *corev1.Secret // Add the ClusterPoolRef to the ClusterDeployment, and move it to the end of the slice. for i, obj := range objs { var ok bool @@ -704,10 +728,30 @@ func (r *ReconcileClusterPool) createCluster( continue } poolRef := poolReference(clp) + if clp.Spec.Inventory != nil { + poolRef.ClusterDeploymentCustomizationRef = &corev1.LocalObjectReference{Name: cdc.Name} + } cd.Spec.ClusterPoolRef = &poolRef lastIndex := len(objs) - 1 objs[i], objs[lastIndex] = objs[lastIndex], objs[i] } + // Apply inventory customization + if clp.Spec.Inventory != nil { + for _, obj := range objs { + if !isInstallConfigSecret(obj) { + continue + } + ics = obj.(*corev1.Secret) + installConfig, err := applyPatches(cdc.Spec.InstallConfigPatches, ics.StringData["install-config.yaml"], logger) + if err != nil { + r.updateInventoryEntryStatus(clp, cdc.Name, "Broken by configuration", logger) + return nil, err + } + + ics.StringData["install-config.yaml"] = installConfig + } + } + // Create the resources. for _, obj := range objs { if err := r.Client.Create(context.Background(), obj.(client.Object)); err != nil { @@ -914,6 +958,35 @@ func (r *ReconcileClusterPool) setAvailableCapacityCondition(pool *hivev1.Cluste return nil } +func (r *ReconcileClusterPool) setInventoryValidCondition(pool *hivev1.ClusterPool, valid bool, msg string, logger log.FieldLogger) error { + status := corev1.ConditionTrue + reason := "InventoryValid" + message := "Inventory is Valid" + updateConditionCheck := controllerutils.UpdateConditionNever + if !valid { + status = corev1.ConditionFalse + reason = "Invalid" + message = msg + updateConditionCheck = controllerutils.UpdateConditionIfReasonOrMessageChange + } + conds, changed := controllerutils.SetClusterPoolConditionWithChangeCheck( + pool.Status.Conditions, + hivev1.ClusterPoolInventoryValidCondition, + status, + reason, + message, + updateConditionCheck, + ) + if changed { + pool.Status.Conditions = conds + if err := r.Status().Update(context.Background(), pool); err != nil { + logger.WithError(err).Log(controllerutils.LogLevel(err), "could not update ClusterPool conditions") + return errors.Wrap(err, "could not update ClusterPool conditions") + } + } + return nil +} + func (r *ReconcileClusterPool) verifyClusterImageSet(pool *hivev1.ClusterPool, logger log.FieldLogger) error { err := r.Get(context.Background(), client.ObjectKey{Name: pool.Spec.ImageSetRef.Name}, &hivev1.ClusterImageSet{}) if err != nil { @@ -1006,9 +1079,280 @@ func (r *ReconcileClusterPool) createCloudBuilder(pool *hivev1.ClusterPool, logg cloudBuilder.Region = platform.Azure.Region cloudBuilder.CloudName = platform.Azure.CloudName return cloudBuilder, nil - // TODO: OpenStack, VMware, and Ovirt. + case platform.OpenStack != nil: + credsSecret, err := r.getCredentialsSecret(pool, platform.OpenStack.CredentialsSecretRef.Name, logger) + if err != nil { + return nil, err + } + cloudBuilder := clusterresource.NewOpenStackCloudBuilderFromSecret(credsSecret) + cloudBuilder.Cloud = platform.OpenStack.Cloud + return cloudBuilder, nil + // TODO: VMware, and Ovirt. default: logger.Info("unsupported platform") return nil, errors.New("unsupported platform") } } + +// updateInventory updates and initializes ClusterPool Status.Inventory and related resources +func (r *ReconcileClusterPool) updateInventory(pool *hivev1.ClusterPool, logger log.FieldLogger) error { + var msg string + var inventoryList []string + valid := 0 + missingCustomizations := false + smallInventory := false + if pool.Status.Inventory == nil { + pool.Status.Inventory = map[string]hivev1.InventoryEntryStatus{} + if err := r.Status().Update(context.TODO(), pool); err != nil { + logger.WithError(err).Log(controllerutils.LogLevel(err), "failed to update cluster pool inventory status") + return err + } + } + inventory := pool.Status.Inventory + for _, entry := range pool.Spec.Inventory { + inventoryList = append(inventoryList, entry.Name) + entryStatus := inventory[entry.Name] + // Finding inventory resource + cdc, err := r.getClusterDeploymentCustomization(pool, entry.Name, logger) + if err != nil { + missingCustomizations = true + continue + } + // Initialize status + if entryStatus.Status == "" || entryStatus.Status == "Missing" { + entryStatus.Attempts = defaultInventoryAttempts + entryStatus.Status = "Available" + } + + // Reconcile resource ownership + if (entryStatus.ClusterDeployment != nil && entryStatus.Status != "Reserved") || (entryStatus.ClusterDeployment == nil && entryStatus.Status == "Reserved") { + currentAvailability := controllerutils.FindClusterDeploymentCustomizationCondition( + cdc.Status.Conditions, + hivev1.ClusterDeploymentCustomizationAvailableCondition, + ) + rCD := &hivev1.ClusterDeployment{} + eCD := &hivev1.ClusterDeployment{} + + if err := r.Get(context.TODO(), types.NamespacedName{Namespace: pool.Namespace, Name: cdc.Status.ClusterDeploymentRef.Name}, rCD); err != nil { + rCD = nil + } + if err := r.Get(context.TODO(), types.NamespacedName{Namespace: pool.Namespace, Name: entryStatus.ClusterDeployment.Name}, eCD); err != nil { + eCD = nil + } + + // Resource claims to be Available + if currentAvailability == nil || currentAvailability.Status == corev1.ConditionTrue { + // either exists then update the resource condition "Available" to false + if rCD != nil || eCD != nil { + if err := r.setClusterDeploymentCustomizationCondition(cdc, false, "Reserved", "Fix Reservation", logger); err != nil { + logger.WithError(err).Error("Failed to fix Cluster Deployment Customization condition to reserved") + } + if rCD == nil && eCD != nil { + cdc.Status.ClusterDeploymentRef = entryStatus.ClusterDeployment + } + } + + } else { // Resource claims to be Reserved + // if both nil, then update resource condition "Available" to true + if rCD == nil && eCD == nil { + if err := r.setClusterDeploymentCustomizationCondition(cdc, true, "Available", "Fix Reservation", logger); err != nil { + logger.WithError(err).Error("Failed to fix Cluster Deployment Customization condition to available") + } + } + } + // if both rCD and eCD exists and they are different then Hive is broken: + // set entryStatus.ClusterDeployment to broken and entryStatus.Status to "Unavailable" + if rCD != nil && eCD != nil && rCD != eCD { + entryStatus.Status = "Unavailable" + // TODO: how to set CD to be known as broken? + } + if rCD != nil && eCD == nil { + // if only resource CD exists, and it doesn't belong to this CP then update entryStatus.Status to "Unavailable" + if rCD.Spec.ClusterPoolRef.PoolName != pool.Name { + entryStatus.Status = "Unavailable" + } else { + // if only resource CD exists, and it belongs to this CP then update entryStatus.ClusterDeployemnt with resource CD and Status to "Reserved" + entryStatus.Status = "Reserved" + entryStatus.ClusterDeployment = cdc.Status.ClusterDeploymentRef + } + } + } + + // Version management + entryVesrion := fmt.Sprintf("%x", deephash.Hash(cdc.Spec)) + if entryStatus.Version == "" { + entryStatus.Version = entryVesrion + } + if entryStatus.Version != entryVesrion { + if entryStatus.ClusterDeployment != nil { + entryStatus.Status = "To Be Updated" + } else { + entryStatus.Version = entryVesrion + entryStatus.Status = "Available" + entryStatus.Attempts = defaultInventoryAttempts + } + } + // Attempts handling + if entryStatus.Attempts == 0 { + entryStatus.Status = "Broken by cloud" + } + // valid number update + if entryStatus.Status == "Available" || entryStatus.Status == "Reserved" { + valid += 1 + } + inventory[entry.Name] = entryStatus + + } + // Update deleted entries + for entryName, entryStatus := range pool.Status.Inventory { + deleted := true + for _, entry := range inventoryList { + if entry == entryName { + deleted = false + } + } + if deleted { + entryStatus.Status = "To Be Deleted" + pool.Status.Inventory[entryName] = entryStatus + } + } + // Not enough entries + size := int(pool.Spec.Size) + if pool.Spec.MaxSize != nil { + size = int(*pool.Spec.MaxSize) + } + if valid < size { + smallInventory = true + } + // Update ClusterPool status + pool.Status.Inventory = inventory + if err := r.Status().Update(context.TODO(), pool); err != nil { + logger.WithError(err).Log(controllerutils.LogLevel(err), "failed to update cluster pool inventory status") + return err + } + // Update ClusterPool inventory condition + if missingCustomizations { + msg += "missing resource" + } + if missingCustomizations { + // Inventory is vaild until it has enough items to meet clusterpool needs, message contains additional information + if err := r.setInventoryValidCondition(pool, smallInventory, msg, logger); err != nil { + return err + } + } else { + if err := r.setInventoryValidCondition(pool, true, "valid", logger); err != nil { + return err + } + } + return nil +} + +// INFO: [Fairness](https://github.com/openshift/hive/blob/master/docs/enhancements/clusterpool-inventory.md#fairness) +// The function loops over the list of inventory items and picks the first available customization. +// Failing to apply a customization (in any cluster pool) will cause to change its status to unvailable and a new cluster will be queued. +func (r *ReconcileClusterPool) getInventoryCustomization(pool *hivev1.ClusterPool, logger log.FieldLogger) (*hivev1.ClusterDeploymentCustomization, error) { + for _, entry := range pool.Spec.Inventory { + entryStatus := pool.Status.Inventory[entry.Name] + if (entry.Kind == hivev1.ClusterDeploymentCustomizationInventoryEntry || entry.Kind == "") && + entryStatus.Status == "Available" && entryStatus.ClusterDeployment == nil { + cdc, err := r.getClusterDeploymentCustomization(pool, entry.Name, logger) + if err != nil { + continue + } + + if err := r.setClusterDeploymentCustomizationCondition(cdc, false, "Reservation", "Reserving cluster deployment customization", logger); err != nil { + continue + } + + if err := r.updateInventoryEntryStatus(pool, entry.Name, "Reserved", logger); err != nil { + log.WithError(err).Error("failed to update inventory status, continue") + } + + return cdc, nil + } + logger.Debugf("customization %s is invalid", entry.Name) + } + return nil, errors.New("no customization available") +} + +func (r *ReconcileClusterPool) getClusterDeploymentCustomization(pool *hivev1.ClusterPool, cdcName string, logger log.FieldLogger) (*hivev1.ClusterDeploymentCustomization, error) { + cdc := &hivev1.ClusterDeploymentCustomization{} + ref := types.NamespacedName{Namespace: pool.Namespace, Name: cdcName} + if err := r.Get(context.TODO(), ref, cdc); err != nil { + r.updateInventoryEntryStatus(pool, cdcName, "Missing", logger) + log.WithError(err).Error("error reading customization") + return nil, err + } + return cdc, nil +} + +func (r *ReconcileClusterPool) setClusterDeploymentCustomizationCondition(cdc *hivev1.ClusterDeploymentCustomization, available bool, reason string, message string, logger log.FieldLogger) error { + condition := corev1.ConditionFalse + if available { + condition = corev1.ConditionTrue + } + cdc.Status.Conditions = controllerutils.SetClusterDeploymentCustomizationCondition( + cdc.Status.Conditions, + hivev1.ClusterDeploymentCustomizationAvailableCondition, + condition, + reason, + message, + controllerutils.UpdateConditionIfReasonOrMessageChange, + ) + if err := r.Status().Update(context.Background(), cdc); err != nil { + logger.WithError(err).Log(controllerutils.LogLevel(err), "could not update ClusterDeploymentCustomization conditions") + return err + } + return nil +} + +func (r *ReconcileClusterPool) updateInventoryEntryStatus(pool *hivev1.ClusterPool, cdcName, status string, logger log.FieldLogger) error { + entryStatus := pool.Status.Inventory[cdcName] + entryStatus.Status = status + pool.Status.Inventory[cdcName] = entryStatus + if err := r.Status().Update(context.Background(), pool); err != nil { + logger.WithError(err).Log(controllerutils.LogLevel(err), "failed to update cluster pool inventory status ") + return err + } + return nil +} + +func applyPatches(patches []hivev1.PatchEntity, data string, logger log.FieldLogger) (string, error) { + targetJson, err := yaml.YAMLToJSON([]byte(data)) + if err != nil { + log.WithError(err).Error("unable to parse install-config template") + return data, err + } + + patchJson, err := json.Marshal(patches) + if err != nil { + log.WithError(err).Error("unable to marshal patches to json") + return data, err + } + + patch, err := jsonpatch.DecodePatch(patchJson) + if err != nil { + log.WithError(err).Error("unable to create json patch") + return data, err + } + + patchedJson, err := patch.Apply(targetJson) + if err != nil { + log.WithError(err).Error("unable to patch install-config template") + return data, err + } + + patchedYaml, _ := yaml.JSONToYAML(patchedJson) + + return string(patchedYaml), nil +} + +func isInstallConfigSecret(obj interface{}) bool { + if secret, ok := obj.(*corev1.Secret); ok { + _, ok := secret.StringData["install-config.yaml"] + if ok { + return true + } + } + return false +} diff --git a/pkg/controller/clusterpool/clusterpool_controller_test.go b/pkg/controller/clusterpool/clusterpool_controller_test.go index 0b78f50c79a..3266a48c9ff 100644 --- a/pkg/controller/clusterpool/clusterpool_controller_test.go +++ b/pkg/controller/clusterpool/clusterpool_controller_test.go @@ -28,6 +28,7 @@ import ( controllerutils "github.com/openshift/hive/pkg/controller/utils" testclaim "github.com/openshift/hive/pkg/test/clusterclaim" testcd "github.com/openshift/hive/pkg/test/clusterdeployment" + testcdc "github.com/openshift/hive/pkg/test/clusterdeploymentcustomization" testcp "github.com/openshift/hive/pkg/test/clusterpool" testgeneric "github.com/openshift/hive/pkg/test/generic" testsecret "github.com/openshift/hive/pkg/test/secret" @@ -38,6 +39,7 @@ const ( testLeasePoolName = "aws-us-east-1" credsSecretName = "aws-creds" imageSetName = "test-image-set" + cdcName = "test-cdc" ) func TestReconcileClusterPool(t *testing.T) { @@ -72,6 +74,20 @@ func TestReconcileClusterPool(t *testing.T) { Status: corev1.ConditionUnknown, Type: hivev1.ClusterPoolAllClustersCurrentCondition, }), + testcp.WithCondition(hivev1.ClusterPoolCondition{ + Status: corev1.ConditionUnknown, + Type: hivev1.ClusterPoolInventoryValidCondition, + }), + ) + + inventoryPoolVersion := "8b918983c1659bbd" + inventroyPoolBuilder := initializedPoolBuilder.Options( + testcp.WithInventory([]hivev1.InventoryEntry{ + { + Kind: hivev1.ClusterDeploymentCustomizationInventoryEntry, + Name: cdcName, + }, + }), ) cdBuilder := func(name string) testcd.Builder { return testcd.FullBuilder(name, name, scheme).Options( @@ -92,6 +108,8 @@ func TestReconcileClusterPool(t *testing.T) { existing []runtime.Object noClusterImageSet bool noCredsSecret bool + noCustomization bool + inventory bool expectError bool expectedTotalClusters int expectedObservedSize int32 @@ -101,6 +119,7 @@ func TestReconcileClusterPool(t *testing.T) { expectedMissingDependenciesStatus corev1.ConditionStatus expectedCapacityStatus corev1.ConditionStatus expectedCDCurrentStatus corev1.ConditionStatus + expectedInventoryVaildStatus corev1.ConditionStatus expectedMissingDependenciesMessage string expectedAssignedClaims int expectedUnassignedClaims int @@ -121,6 +140,7 @@ func TestReconcileClusterPool(t *testing.T) { expectedMissingDependenciesStatus: corev1.ConditionUnknown, expectedCapacityStatus: corev1.ConditionUnknown, expectedCDCurrentStatus: corev1.ConditionUnknown, + expectedInventoryVaildStatus: corev1.ConditionUnknown, }, { name: "copyover fields", @@ -166,6 +186,72 @@ func TestReconcileClusterPool(t *testing.T) { }, expectPoolVersionChanged: true, }, + { + name: "poolVersion changes with new Inventory", + existing: []runtime.Object{ + initializedPoolBuilder.Build(testcp.WithInventory( + []hivev1.InventoryEntry{ + { + Kind: hivev1.ClusterDeploymentCustomizationInventoryEntry, + Name: cdcName, + }, + }, + )), + }, + expectPoolVersionChanged: true, + }, + { + name: "poolVersion doens't changes with existing Inventory", + existing: []runtime.Object{ + inventroyPoolBuilder.Build(testcp.WithInventory( + []hivev1.InventoryEntry{ + { + Kind: hivev1.ClusterDeploymentCustomizationInventoryEntry, + Name: "test-cdc-2", + }, + }, + )), + }, + inventory: true, + expectPoolVersionChanged: false, + }, + { + name: "poolVersion doens't changes with existing Inventory 2", + existing: []runtime.Object{ + inventroyPoolBuilder.Build(), + }, + inventory: true, + expectPoolVersionChanged: false, + }, + { + name: "customized clusterpool will creates a cluster", + existing: []runtime.Object{ + inventroyPoolBuilder.Build(testcp.WithSize(1)), + }, + inventory: true, + expectedTotalClusters: 1, + expectedObservedSize: 0, + expectedObservedReady: 0, + expectedInventoryVaildStatus: corev1.ConditionTrue, + }, + { + name: "customized clusterpoll inventory valid", + existing: []runtime.Object{ + inventroyPoolBuilder.Build(testcp.WithSize(1)), + testcd.FullBuilder("c1", "c1", scheme).Build( + testcd.WithPoolVersion(inventoryPoolVersion), + testcd.WithPowerState(hivev1.ClusterPowerStateHibernating), + testcd.WithUnclaimedClusterPoolReference(testNamespace, testLeasePoolName), + testcd.WithClusterDeploymentCustomizationReference(cdcName), + testcd.Running(), + ), + }, + inventory: true, + expectedTotalClusters: 1, + expectedObservedSize: 1, + expectedObservedReady: 1, + expectedInventoryVaildStatus: corev1.ConditionTrue, + }, { // This also proves we only delete one stale cluster at a time name: "delete oldest stale cluster first", @@ -1417,6 +1503,12 @@ func TestReconcileClusterPool(t *testing.T) { Build(testsecret.WithDataKeyValue("dummykey", []byte("dummyval"))), ) } + if !test.noCustomization { + test.existing = append( + test.existing, + testcdc.FullBuilder(testNamespace, cdcName, scheme).Build(), + ) + } fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(test.existing...).Build() logger := log.New() logger.SetLevel(log.DebugLevel) @@ -1452,10 +1544,14 @@ func TestReconcileClusterPool(t *testing.T) { assert.Equal(t, test.expectedObservedSize, pool.Status.Size, "unexpected observed size") assert.Equal(t, test.expectedObservedReady, pool.Status.Ready, "unexpected observed ready count") currentPoolVersion := calculatePoolVersion(pool) + expectedPoolVersion := initialPoolVersion + if test.inventory { + expectedPoolVersion = inventoryPoolVersion + } assert.Equal( - t, test.expectPoolVersionChanged, currentPoolVersion != initialPoolVersion, + t, test.expectPoolVersionChanged, currentPoolVersion != expectedPoolVersion, "expectPoolVersionChanged is %t\ninitial %q\nfinal %q", - test.expectPoolVersionChanged, initialPoolVersion, currentPoolVersion) + test.expectPoolVersionChanged, expectedPoolVersion, currentPoolVersion) expectedCDCurrentStatus := test.expectedCDCurrentStatus if expectedCDCurrentStatus == "" { expectedCDCurrentStatus = corev1.ConditionTrue @@ -1486,6 +1582,14 @@ func TestReconcileClusterPool(t *testing.T) { } } + if test.expectedInventoryVaildStatus != "" { + inventoryValidCondition := controllerutils.FindClusterPoolCondition(pool.Status.Conditions, hivev1.ClusterPoolInventoryValidCondition) + if assert.NotNil(t, inventoryValidCondition, "did not find InventoryValid condition") { + assert.Equal(t, test.expectedInventoryVaildStatus, inventoryValidCondition.Status, + "unexpcted InventoryValid condition status") + } + } + cds := &hivev1.ClusterDeploymentList{} err = fakeClient.List(context.Background(), cds) require.NoError(t, err) diff --git a/pkg/controller/clusterpool/collections.go b/pkg/controller/clusterpool/collections.go index 1c679e913ac..d7d7916c45a 100644 --- a/pkg/controller/clusterpool/collections.go +++ b/pkg/controller/clusterpool/collections.go @@ -263,6 +263,19 @@ func isBroken(cd *hivev1.ClusterDeployment, pool *hivev1.ClusterPool, logger log logger.Infof("Cluster %s is broken due to resume timeout", cd.Name) return true } + if cd.Spec.ClusterPoolRef != nil && cd.Spec.ClusterPoolRef.ClusterDeploymentCustomizationRef != nil { + customizationExists := false + cdcName := cd.Spec.ClusterPoolRef.ClusterDeploymentCustomizationRef.Name + for _, entry := range pool.Spec.Inventory { + if cdcName == entry.Name { + customizationExists = true + } + } + if !customizationExists { + logger.Infof("Cluster %s is broken due to removed customization %s", cd.Name, cdcName) + return true + } + } return false } diff --git a/pkg/controller/utils/conditions.go b/pkg/controller/utils/conditions.go index 88e23439a28..66f25d7a362 100644 --- a/pkg/controller/utils/conditions.go +++ b/pkg/controller/utils/conditions.go @@ -522,6 +522,58 @@ func SetMachinePoolCondition( return newConditions } +// SetClusterDeploymentCustomizationCondition sets a condition on a ClusterDeploymentCustomization resource's status +func SetClusterDeploymentCustomizationCondition( + conditions []hivev1.ClusterDeploymentCustomizationCondition, + conditionType hivev1.ClusterDeploymentCustomizationConditionType, + status corev1.ConditionStatus, + reason string, + message string, + updateConditionCheck UpdateConditionCheck, +) []hivev1.ClusterDeploymentCustomizationCondition { + now := metav1.Now() + existingCondition := FindClusterDeploymentCustomizationCondition(conditions, conditionType) + if existingCondition == nil { + if status == corev1.ConditionTrue { + conditions = append( + conditions, + hivev1.ClusterDeploymentCustomizationCondition{ + Type: conditionType, + Status: status, + Reason: reason, + Message: message, + LastTransitionTime: now, + LastProbeTime: now, + }, + ) + } + } else { + if shouldUpdateCondition( + existingCondition.Status, existingCondition.Reason, existingCondition.Message, + status, reason, message, + updateConditionCheck, + ) { + if existingCondition.Status != status { + existingCondition.LastTransitionTime = now + } + existingCondition.Status = status + existingCondition.Reason = reason + existingCondition.Message = message + existingCondition.LastProbeTime = now + } + } + return conditions +} + +func FindClusterDeploymentCustomizationCondition(conditions []hivev1.ClusterDeploymentCustomizationCondition, conditionType hivev1.ClusterDeploymentCustomizationConditionType) *hivev1.ClusterDeploymentCustomizationCondition { + for i, condition := range conditions { + if condition.Type == conditionType { + return &conditions[i] + } + } + return nil +} + // SetMachinePoolConditionWithChangeCheck sets a condition on a MachinePool resource's status. // It returns the conditions as well a boolean indicating whether there was a change made // to the conditions. diff --git a/pkg/test/clusterdeployment/clusterdeployment.go b/pkg/test/clusterdeployment/clusterdeployment.go index 8eed570d70b..6c8efca6a5f 100644 --- a/pkg/test/clusterdeployment/clusterdeployment.go +++ b/pkg/test/clusterdeployment/clusterdeployment.go @@ -3,6 +3,7 @@ package clusterdeployment import ( "time" + corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -220,3 +221,9 @@ func WithAzurePlatform(platform *hivev1azure.Platform) Option { clusterDeployment.Spec.Platform.Azure = platform } } + +func WithClusterDeploymentCustomizationReference(cdcName string) Option { + return func(clusterDeployment *hivev1.ClusterDeployment) { + clusterDeployment.Spec.ClusterPoolRef.ClusterDeploymentCustomizationRef = &corev1.LocalObjectReference{Name: cdcName} + } +} diff --git a/pkg/test/clusterdeploymentcustomization/clusterdeploymentcustomization.go b/pkg/test/clusterdeploymentcustomization/clusterdeploymentcustomization.go new file mode 100644 index 00000000000..db2f28dd60b --- /dev/null +++ b/pkg/test/clusterdeploymentcustomization/clusterdeploymentcustomization.go @@ -0,0 +1,73 @@ +package clusterdeploymentcustomization + +import ( + "k8s.io/apimachinery/pkg/runtime" + + hivev1 "github.com/openshift/hive/apis/hive/v1" + + "github.com/openshift/hive/pkg/test/generic" +) + +// Option defines a function signature for any function that wants to be passed into Build +type Option func(*hivev1.ClusterDeploymentCustomization) + +// Build runs each of the functions passed in to generate the object. +func Build(opts ...Option) *hivev1.ClusterDeploymentCustomization { + retval := &hivev1.ClusterDeploymentCustomization{} + for _, o := range opts { + o(retval) + } + + return retval +} + +type Builder interface { + Build(opts ...Option) *hivev1.ClusterDeploymentCustomization + + Options(opts ...Option) Builder + + GenericOptions(opts ...generic.Option) Builder +} + +func BasicBuilder() Builder { + return &builder{} +} + +func FullBuilder(namespace, name string, typer runtime.ObjectTyper) Builder { + b := &builder{} + return b.GenericOptions( + generic.WithTypeMeta(typer), + generic.WithResourceVersion("1"), + generic.WithNamespace(namespace), + generic.WithName(name), + ) +} + +type builder struct { + options []Option +} + +func (b *builder) Build(opts ...Option) *hivev1.ClusterDeploymentCustomization { + return Build(append(b.options, opts...)...) +} + +func (b *builder) Options(opts ...Option) Builder { + return &builder{ + options: append(b.options, opts...), + } +} + +func (b *builder) GenericOptions(opts ...generic.Option) Builder { + options := make([]Option, len(opts)) + for i, o := range opts { + options[i] = Generic(o) + } + return b.Options(options...) +} + +// Generic allows common functions applicable to all objects to be used as Options to Build +func Generic(opt generic.Option) Option { + return func(clusterDeployment *hivev1.ClusterDeploymentCustomization) { + opt(clusterDeployment) + } +} diff --git a/pkg/test/clusterpool/clusterpool.go b/pkg/test/clusterpool/clusterpool.go index 8d156a4ce55..cf3d3559276 100644 --- a/pkg/test/clusterpool/clusterpool.go +++ b/pkg/test/clusterpool/clusterpool.go @@ -184,3 +184,9 @@ func WithRunningCount(size int) Option { clusterPool.Spec.RunningCount = int32(size) } } + +func WithInventory(inventory []hivev1.InventoryEntry) Option { + return func(clusterPool *hivev1.ClusterPool) { + clusterPool.Spec.Inventory = inventory + } +} diff --git a/pkg/validating-webhooks/hive/v1/clusterdeploymentcustomization_validating_admission_hook.go b/pkg/validating-webhooks/hive/v1/clusterdeploymentcustomization_validating_admission_hook.go new file mode 100644 index 00000000000..bfb7069bd54 --- /dev/null +++ b/pkg/validating-webhooks/hive/v1/clusterdeploymentcustomization_validating_admission_hook.go @@ -0,0 +1,285 @@ +package v1 + +import ( + "fmt" + "net/http" + "strings" + + log "github.com/sirupsen/logrus" + + admissionv1beta1 "k8s.io/api/admission/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + hivev1 "github.com/openshift/hive/apis/hive/v1" +) + +const ( + clusterDeploymentCustomizationGroup = "hive.openshift.io" + clusterDeploymentCustomizationVersion = "v1" + clusterDeploymentCustomizationResource = "clusterdeploymentcustomization" + + clusterDeploymentCustomizationAdmissionGroup = "admission.hive.openshift.io" + clusterDeploymentCustomizationAdmissionVersion = "v1" +) + +// ClusterDeploymentCustomizationlValidatingAdmissionHook is a struct that is used to reference what code should be run by the generic-admission-server. +type ClusterDeploymentCustomizationValidatingAdmissionHook struct { + decoder *admission.Decoder +} + +// NewClusterDeploymentCustomizationValidatingAdmissionHook constructs a new ClusterDeploymentCustomizationValidatingAdmissionHook +func NewClusterDeploymentCustomizationValidatingAdmissionHook(decoder *admission.Decoder) *ClusterDeploymentCustomizationValidatingAdmissionHook { + return &ClusterDeploymentCustomizationValidatingAdmissionHook{ + decoder: decoder, + } +} + +// ValidatingResource is called by generic-admission-server on startup to register the returned REST resource through which the +// webhook is accessed by the kube apiserver. +// For example, generic-admission-server uses the data below to register the webhook on the REST resource "/apis/admission.hive.openshift.io/v1/clusterdeploymentcustomizationvalidators". +// When the kube apiserver calls this registered REST resource, the generic-admission-server calls the Validate() method below. +func (a *ClusterDeploymentCustomizationValidatingAdmissionHook) ValidatingResource() (plural schema.GroupVersionResource, singular string) { + log.WithFields(log.Fields{ + "group": clusterDeploymentCustomizationAdmissionGroup, + "version": clusterDeploymentCustomizationAdmissionVersion, + "resource": "clusterdeploymentcustomizationvalidator", + }).Info("Registering validation REST resource") + + // NOTE: This GVR is meant to be different than the ClusterDeploymentCustomization CRD GVR which has group "hive.openshift.io". + return schema.GroupVersionResource{ + Group: clusterDeploymentCustomizationAdmissionGroup, + Version: clusterDeploymentCustomizationAdmissionVersion, + Resource: "clusterdeploymentcustomizationvalidators", + }, + "clusterdeploymentcustomizationvalidator" +} + +// Initialize is called by generic-admission-server on startup to setup any special initialization that your webhook needs. +func (a *ClusterDeploymentCustomizationValidatingAdmissionHook) Initialize(kubeClientConfig *rest.Config, stopCh <-chan struct{}) error { + log.WithFields(log.Fields{ + "group": clusterDeploymentCustomizationAdmissionGroup, + "version": clusterDeploymentCustomizationAdmissionVersion, + "resource": "clusterdeploymentcustomizationvalidator", + }).Info("Initializing validation REST resource") + return nil // No initialization needed right now. +} + +// Validate is called by generic-admission-server when the registered REST resource above is called with an admission request. +// Usually it's the kube apiserver that is making the admission validation request. +func (a *ClusterDeploymentCustomizationValidatingAdmissionHook) Validate(admissionSpec *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse { + contextLogger := log.WithFields(log.Fields{ + "operation": admissionSpec.Operation, + "group": admissionSpec.Resource.Group, + "version": admissionSpec.Resource.Version, + "resource": admissionSpec.Resource.Resource, + "method": "Validate", + }) + + if !a.shouldValidate(admissionSpec) { + contextLogger.Info("Skipping validation for request") + // The request object isn't something that this validator should validate. + // Therefore, we say that it's Allowed. + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } + } + + contextLogger.Info("Validating request") + + switch admissionSpec.Operation { + case admissionv1beta1.Create: + return a.validateCreate(admissionSpec) + case admissionv1beta1.Update: + return a.validateUpdate(admissionSpec) + default: + contextLogger.Info("Successful validation") + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } + } +} + +// shouldValidate explicitly checks if the request should validated. For example, this webhook may have accidentally been registered to check +// the validity of some other type of object with a different GVR. +func (a *ClusterDeploymentCustomizationValidatingAdmissionHook) shouldValidate(admissionSpec *admissionv1beta1.AdmissionRequest) bool { + contextLogger := log.WithFields(log.Fields{ + "operation": admissionSpec.Operation, + "group": admissionSpec.Resource.Group, + "version": admissionSpec.Resource.Version, + "resource": admissionSpec.Resource.Resource, + "method": "shouldValidate", + }) + + if admissionSpec.Resource.Group != clusterDeploymentCustomizationGroup { + contextLogger.Info("Returning False, not our group") + return false + } + + if admissionSpec.Resource.Version != clusterDeploymentCustomizationVersion { + contextLogger.Info("Returning False, it's our group, but not the right version") + return false + } + + if admissionSpec.Resource.Resource != clusterDeploymentCustomizationResource { + contextLogger.Info("Returning False, it's our group and version, but not the right resource") + return false + } + + // If we get here, then we're supposed to validate the object. + contextLogger.Debug("Returning True, passed all prerequisites.") + return true +} + +// validateCreate specifically validates create operations for ClusterDeploymentCustomization objects. +func (a *ClusterDeploymentCustomizationValidatingAdmissionHook) validateCreate(admissionSpec *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse { + contextLogger := log.WithFields(log.Fields{ + "operation": admissionSpec.Operation, + "group": admissionSpec.Resource.Group, + "version": admissionSpec.Resource.Version, + "resource": admissionSpec.Resource.Resource, + "method": "validateCreate", + }) + + cdc := &hivev1.ClusterDeploymentCustomization{} + if err := a.decoder.DecodeRaw(admissionSpec.Object, cdc); err != nil { + contextLogger.Errorf("Failed unmarshaling Object: %v", err.Error()) + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: err.Error(), + }, + } + } + + // Add the new data to the contextLogger + contextLogger.Data["object.Name"] = cdc.Name + + // TODO: Put Create Validation Here (or in openAPIV3Schema validation section of crd) + + if len(cdc.Name) > validation.DNS1123LabelMaxLength { + message := fmt.Sprintf("Invalid cluster deployment customization name (.meta.name): %s", validation.MaxLenError(validation.DNS1123LabelMaxLength)) + contextLogger.Error(message) + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: message, + }, + } + } + + allErrs := field.ErrorList{} + specPath := field.NewPath("spec") + + allErrs = append(allErrs, validateInstallConfigPatches(specPath.Child("installConfigPatches"), cdc.Spec.InstallConfigPatches)...) + + if len(allErrs) > 0 { + status := errors.NewInvalid(schemaGVK(admissionSpec.Kind).GroupKind(), admissionSpec.Name, allErrs).Status() + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &status, + } + } + + // If we get here, then all checks passed, so the object is valid. + contextLogger.Info("Successful validation") + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } +} + +// validateUpdate specifically validates update operations for ClusterDeployment objects. +func (a *ClusterDeploymentCustomizationValidatingAdmissionHook) validateUpdate(admissionSpec *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse { + contextLogger := log.WithFields(log.Fields{ + "operation": admissionSpec.Operation, + "group": admissionSpec.Resource.Group, + "version": admissionSpec.Resource.Version, + "resource": admissionSpec.Resource.Resource, + "method": "validateUpdate", + }) + + newObject := &hivev1.ClusterDeploymentCustomization{} + if err := a.decoder.DecodeRaw(admissionSpec.Object, newObject); err != nil { + contextLogger.Errorf("Failed unmarshaling Object: %v", err.Error()) + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: err.Error(), + }, + } + } + + // Add the new data to the contextLogger + contextLogger.Data["object.Name"] = newObject.Name + + oldObject := &hivev1.ClusterDeploymentCustomization{} + if err := a.decoder.DecodeRaw(admissionSpec.OldObject, oldObject); err != nil { + contextLogger.Errorf("Failed unmarshaling OldObject: %v", err.Error()) + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: err.Error(), + }, + } + } + + // Add the new data to the contextLogger + contextLogger.Data["oldObject.Name"] = oldObject.Name + + allErrs := field.ErrorList{} + specPath := field.NewPath("spec") + + allErrs = append(allErrs, validateInstallConfigPatches(specPath, newObject.Spec.InstallConfigPatches)...) + + if len(allErrs) > 0 { + contextLogger.WithError(allErrs.ToAggregate()).Info("failed validation") + status := errors.NewInvalid(schemaGVK(admissionSpec.Kind).GroupKind(), admissionSpec.Name, allErrs).Status() + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &status, + } + } + + // If we get here, then all checks passed, so the object is valid. + contextLogger.Info("Successful validation") + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } +} + +func validateInstallConfigPatches(path *field.Path, patches []hivev1.PatchEntity) field.ErrorList { + allErrs := field.ErrorList{} + + for i, patch := range patches { + if !isValidOP(patch.Op) { + allErrs = append(allErrs, field.Invalid(path.Index(i), patch, "install config patch op must be a valid json patch operation")) + } + if len(patch.Path) == 0 || !strings.HasPrefix(patch.Path, "/") { + allErrs = append(allErrs, field.Invalid(path.Index(i), patch, "install config patch path must start with '/'")) + } + } + return allErrs +} + +func isValidOP(op string) bool { + switch op { + case + "replace", + "add", + "remove", + "test", + "copy", + "move": + return true + } + return false +} diff --git a/pkg/validating-webhooks/hive/v1/clusterpool_validating_admission_hook.go b/pkg/validating-webhooks/hive/v1/clusterpool_validating_admission_hook.go index 1302c8dbad0..47d706fee15 100644 --- a/pkg/validating-webhooks/hive/v1/clusterpool_validating_admission_hook.go +++ b/pkg/validating-webhooks/hive/v1/clusterpool_validating_admission_hook.go @@ -179,6 +179,10 @@ func (a *ClusterPoolValidatingAdmissionHook) validateCreate(admissionSpec *admis allErrs = append(allErrs, validateClusterPlatform(specPath, newObject.Spec.Platform)...) + if newObject.Spec.Inventory != nil { + allErrs = append(allErrs, validateInventory(specPath, newObject.Spec.Inventory)...) + } + if len(allErrs) > 0 { status := errors.NewInvalid(schemaGVK(admissionSpec.Kind).GroupKind(), admissionSpec.Name, allErrs).Status() return &admissionv1beta1.AdmissionResponse{ @@ -239,6 +243,10 @@ func (a *ClusterPoolValidatingAdmissionHook) validateUpdate(admissionSpec *admis allErrs = append(allErrs, validateClusterPlatform(specPath, newObject.Spec.Platform)...) + if newObject.Spec.Inventory != nil { + allErrs = append(allErrs, validateInventory(specPath, newObject.Spec.Inventory)...) + } + if len(allErrs) > 0 { contextLogger.WithError(allErrs.ToAggregate()).Info("failed validation") status := errors.NewInvalid(schemaGVK(admissionSpec.Kind).GroupKind(), admissionSpec.Name, allErrs).Status() @@ -254,3 +262,11 @@ func (a *ClusterPoolValidatingAdmissionHook) validateUpdate(admissionSpec *admis Allowed: true, } } + +func validateInventory(path *field.Path, inventory []hivev1.InventoryEntry) field.ErrorList { + allErrs := field.ErrorList{} + if len(inventory) == 0 { + allErrs = append(allErrs, field.Invalid(path, inventory, "inventory can't be empty")) + } + return allErrs +} diff --git a/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeployment_types.go b/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeployment_types.go index 3fe46701981..948e15fa4d0 100644 --- a/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeployment_types.go +++ b/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeployment_types.go @@ -50,6 +50,10 @@ const ( // FinalizerArgoCDCluster is used on ClusterDeployments to ensure we clean up the ArgoCD cluster // secret before cleaning up the API object. FinalizerArgoCDCluster = "hive.openshift.io/argocd-cluster" + + // FinalizerCustomizationRelease is used on ClusterDeployments to ensure we run a successful + // release of ClusterDeploymentCustomization + FinalizerCustomizationRelease string = "hive.openshift.io/customization" ) // ClusterPowerState is used to indicate whether a cluster is running or in a @@ -271,6 +275,8 @@ type ClusterPoolReference struct { // ClaimedTimestamp is the time this cluster was assigned to a ClusterClaim. This is only used for // ClusterDeployments belonging to ClusterPools. ClaimedTimestamp *metav1.Time `json:"claimedTimestamp,omitempty"` + // ClusterDeploymentCustomizationRef is the ClusterPool Inventory claimed customization for this ClusterDeployment + ClusterDeploymentCustomizationRef *corev1.LocalObjectReference `json:"clusterDeploymentCustomization"` } // ClusterMetadata contains metadata information about the installed cluster. diff --git a/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go b/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go new file mode 100644 index 00000000000..ec01b6cffad --- /dev/null +++ b/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go @@ -0,0 +1,90 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:noStatus +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterDeploymentCustomization is the Schema for clusterdeploymentcustomizations API +// +kubebuilder:subresource:status +// +k8s:openapi-gen=true +// +kubebuilder:resource:scope=Namespaced +type ClusterDeploymentCustomization struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ClusterDeploymentCustomizationSpec `json:"spec"` + Status ClusterDeploymentCustomizationStatus `json:"status,omitempty"` +} + +// ClusterDeploymentCustomizationSpec defines the desired state of ClusterDeploymentCustomization +type ClusterDeploymentCustomizationSpec struct { + // TODO: documentation + InstallConfigPatches []PatchEntity `json:"installConfigPatches,omitempty"` +} + +// TODO: documentation +type PatchEntity struct { + // +required + Op string `json:"op"` + // +required + Path string `json:"path"` + // +required + Value string `json:"value"` +} + +// ClusterDeploymentCustomizationStatus defines the observed state of ClusterDeploymentCustomization +type ClusterDeploymentCustomizationStatus struct { + // TODO: documentation + // +optional + ClusterDeploymentRef *corev1.LocalObjectReference `json:"clusterDeploymentRef,omitempty"` + + // Conditions includes more detailed status for the cluster deployment customization status. + // +optional + Conditions []ClusterDeploymentCustomizationCondition `json:"conditions,omitempty"` +} + +type ClusterDeploymentCustomizationCondition struct { + // Type is the type of the condition. + Type ClusterDeploymentCustomizationConditionType `json:"type"` + // Status is the status of the condition. + Status corev1.ConditionStatus `json:"status"` + // LastProbeTime is the last time we probed the condition. + // +optional + LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"` + // LastTransitionTime is the last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + // Reason is a unique, one-word, CamelCase reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty"` + // Message is a human-readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty"` +} + +// ClusterDeploymentCustomizationConditionType is a valid value for ClusterDeploymentCustomizationCondition.Type +type ClusterDeploymentCustomizationConditionType string + +const ( + // TODO: add more types + // TODO: shorter name? + ClusterDeploymentCustomizationAvailableCondition ClusterDeploymentCustomizationConditionType = "Available" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterDeploymentCustomizationLis contains the list of ClusterDeploymentCustomization +type ClusterDeploymentCustomizationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []ClusterDeploymentCustomization `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ClusterDeploymentCustomization{}, &ClusterDeploymentCustomizationList{}) +} diff --git a/vendor/github.com/openshift/hive/apis/hive/v1/clusterpool_types.go b/vendor/github.com/openshift/hive/apis/hive/v1/clusterpool_types.go index 06bde7e10f8..79caaa3beef 100644 --- a/vendor/github.com/openshift/hive/apis/hive/v1/clusterpool_types.go +++ b/vendor/github.com/openshift/hive/apis/hive/v1/clusterpool_types.go @@ -92,6 +92,18 @@ type ClusterPoolSpec struct { // HibernationConfig configures the hibernation/resume behavior of ClusterDeployments owned by the ClusterPool. // +optional HibernationConfig *HibernationConfig `json:"hibernationConfig"` + + // Inventory maintains a list entries consumed by the clusterpool + // to customize the default the cluster deployment + // +optional + Inventory []InventoryEntry `json:"inventory,omitempty"` + + // InventoryAttempts is the number of attempts to provision a ClusterDeployment with a given inventory entry. + // On a successful provision, the inventory entry attempts status is updated to this value. + // Negative InventoryAttempts means unlimited attempts, and recommended only for debugging purposes. + // Default number of InventoryAttempts is 5. + // +optional + InventoryAttempts *int32 `json:"inventoryAttempts,omitempty"` } type HibernationConfig struct { @@ -110,6 +122,22 @@ type HibernationConfig struct { ResumeTimeout metav1.Duration `json:"resumeTimeout"` } +// InventoryEntryKind in Kind of the inventory entry +// +kubebuilder:validation:Enum="";ClusterDeploymentCustomization +type InventoryEntryKind string + +const ClusterDeploymentCustomizationInventoryEntry InventoryEntryKind = "ClusterDeploymentCustomization" + +// InventoryEntry maintains a reference to a custom resource consumed by a clusterpool to customize the cluster deployment +type InventoryEntry struct { + // Kind denotes the kind of the referenced resource. The default is ClusterDeploymentCustomization, which is also currently the only supported value. + // +optional + Kind InventoryEntryKind `json:"kind,omitempty"` + // Name is the name of the referenced resource. + // +required + Name string `json:"name,omitempty"` +} + // ClusterPoolClaimLifetime defines the lifetimes for claims for the cluster pool. type ClusterPoolClaimLifetime struct { // Default is the default lifetime of the claim when no lifetime is set on the claim itself. @@ -152,6 +180,10 @@ type ClusterPoolStatus struct { // Conditions includes more detailed status for the cluster pool // +optional Conditions []ClusterPoolCondition `json:"conditions,omitempty"` + + // Inventory tracks the individual status of inventory entries + // +optional + Inventory map[string]InventoryEntryStatus `json:"inventory,omitempty"` } // ClusterPoolCondition contains details for the current condition of a cluster pool @@ -187,8 +219,24 @@ const ( // ClusterPoolAllClustersCurrentCondition indicates whether all unassigned (installing or ready) // ClusterDeployments in the pool match the current configuration of the ClusterPool. ClusterPoolAllClustersCurrentCondition ClusterPoolConditionType = "AllClustersCurrent" + // ClusterPoolInventoryValidCondition is set to provide information on whether the cluster pool inventory is valid + ClusterPoolInventoryValidCondition ClusterPoolConditionType = "InventoryValid" ) +// InventoryEntryStatus contains current details for the related Inventory entry +type InventoryEntryStatus struct { + // Inventory Entry Kind + Kind string `json:"kind"` + // Version of resource instance + Version string `json:"version,omitempty"` + // Status of inventory resource for the cluster pool + Status string `json:"status"` + // ClusterDeployment is a reference to ClusterDeployment using the Inventory resource + ClusterDeployment *corev1.LocalObjectReference `json:"clusterDeployment,omitempty"` + // Attempts left to try using inventory entry + Attempts int32 `json:"attempts,omitempty"` +} + // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/github.com/openshift/hive/apis/hive/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/hive/apis/hive/v1/zz_generated.deepcopy.go index dfdd9fbed1b..5b1cebdf821 100644 --- a/vendor/github.com/openshift/hive/apis/hive/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/hive/apis/hive/v1/zz_generated.deepcopy.go @@ -656,6 +656,134 @@ func (in *ClusterDeploymentCondition) DeepCopy() *ClusterDeploymentCondition { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeploymentCustomization) DeepCopyInto(out *ClusterDeploymentCustomization) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentCustomization. +func (in *ClusterDeploymentCustomization) DeepCopy() *ClusterDeploymentCustomization { + if in == nil { + return nil + } + out := new(ClusterDeploymentCustomization) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterDeploymentCustomization) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeploymentCustomizationCondition) DeepCopyInto(out *ClusterDeploymentCustomizationCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentCustomizationCondition. +func (in *ClusterDeploymentCustomizationCondition) DeepCopy() *ClusterDeploymentCustomizationCondition { + if in == nil { + return nil + } + out := new(ClusterDeploymentCustomizationCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeploymentCustomizationList) DeepCopyInto(out *ClusterDeploymentCustomizationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterDeploymentCustomization, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentCustomizationList. +func (in *ClusterDeploymentCustomizationList) DeepCopy() *ClusterDeploymentCustomizationList { + if in == nil { + return nil + } + out := new(ClusterDeploymentCustomizationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterDeploymentCustomizationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeploymentCustomizationSpec) DeepCopyInto(out *ClusterDeploymentCustomizationSpec) { + *out = *in + if in.InstallConfigPatches != nil { + in, out := &in.InstallConfigPatches, &out.InstallConfigPatches + *out = make([]PatchEntity, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentCustomizationSpec. +func (in *ClusterDeploymentCustomizationSpec) DeepCopy() *ClusterDeploymentCustomizationSpec { + if in == nil { + return nil + } + out := new(ClusterDeploymentCustomizationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeploymentCustomizationStatus) DeepCopyInto(out *ClusterDeploymentCustomizationStatus) { + *out = *in + if in.ClusterDeploymentRef != nil { + in, out := &in.ClusterDeploymentRef, &out.ClusterDeploymentRef + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ClusterDeploymentCustomizationCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentCustomizationStatus. +func (in *ClusterDeploymentCustomizationStatus) DeepCopy() *ClusterDeploymentCustomizationStatus { + if in == nil { + return nil + } + out := new(ClusterDeploymentCustomizationStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterDeploymentList) DeepCopyInto(out *ClusterDeploymentList) { *out = *in @@ -1316,6 +1444,11 @@ func (in *ClusterPoolReference) DeepCopyInto(out *ClusterPoolReference) { in, out := &in.ClaimedTimestamp, &out.ClaimedTimestamp *out = (*in).DeepCopy() } + if in.ClusterDeploymentCustomizationRef != nil { + in, out := &in.ClusterDeploymentCustomizationRef, &out.ClusterDeploymentCustomizationRef + *out = new(corev1.LocalObjectReference) + **out = **in + } return } @@ -1388,6 +1521,11 @@ func (in *ClusterPoolSpec) DeepCopyInto(out *ClusterPoolSpec) { *out = new(HibernationConfig) **out = **in } + if in.Inventory != nil { + in, out := &in.Inventory, &out.Inventory + *out = make([]InventoryEntry, len(*in)) + copy(*out, *in) + } return } @@ -2470,6 +2608,22 @@ func (in *IdentityProviderStatus) DeepCopy() *IdentityProviderStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InventoryEntry) DeepCopyInto(out *InventoryEntry) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InventoryEntry. +func (in *InventoryEntry) DeepCopy() *InventoryEntry { + if in == nil { + return nil + } + out := new(InventoryEntry) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KubeconfigSecretReference) DeepCopyInto(out *KubeconfigSecretReference) { *out = *in @@ -2954,6 +3108,22 @@ func (in *OvirtClusterDeprovision) DeepCopy() *OvirtClusterDeprovision { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PatchEntity) DeepCopyInto(out *PatchEntity) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PatchEntity. +func (in *PatchEntity) DeepCopy() *PatchEntity { + if in == nil { + return nil + } + out := new(PatchEntity) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Platform) DeepCopyInto(out *Platform) { *out = *in