diff --git a/apis/hive/v1/clusterdeployment_types.go b/apis/hive/v1/clusterdeployment_types.go index ff3421b9b3b..cd72af6555b 100644 --- a/apis/hive/v1/clusterdeployment_types.go +++ b/apis/hive/v1/clusterdeployment_types.go @@ -49,6 +49,10 @@ const ( // FinalizerArgoCDCluster is used on ClusterDeployments to ensure we clean up the ArgoCD cluster // secret before cleaning up the API object. FinalizerArgoCDCluster = "hive.openshift.io/argocd-cluster" + + // FinalizerCustomizationRelease is used on ClusterDeployments to ensure we run a successful + // release of ClusterDeploymentCustomization + FinalizerCustomizationRelease string = "hive.openshift.io/customization" ) // ClusterPowerState is used to indicate whether a cluster is running or in a @@ -266,6 +270,8 @@ type ClusterPoolReference struct { // ClaimedTimestamp is the time this cluster was assigned to a ClusterClaim. This is only used for // ClusterDeployments belonging to ClusterPools. ClaimedTimestamp *metav1.Time `json:"claimedTimestamp,omitempty"` + // ClusterDeploymentCustomizationRef is the ClusterPool Inventory claimed customization for this ClusterDeployment + ClusterDeploymentCustomizationRef *corev1.LocalObjectReference `json:"clusterDeploymentCustomization"` } // ClusterMetadata contains metadata information about the installed cluster. diff --git a/apis/hive/v1/clusterdeploymentcustomization_types.go b/apis/hive/v1/clusterdeploymentcustomization_types.go new file mode 100644 index 00000000000..ec01b6cffad --- /dev/null +++ b/apis/hive/v1/clusterdeploymentcustomization_types.go @@ -0,0 +1,90 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:noStatus +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterDeploymentCustomization is the Schema for clusterdeploymentcustomizations API +// +kubebuilder:subresource:status +// +k8s:openapi-gen=true +// +kubebuilder:resource:scope=Namespaced +type ClusterDeploymentCustomization struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ClusterDeploymentCustomizationSpec `json:"spec"` + Status ClusterDeploymentCustomizationStatus `json:"status,omitempty"` +} + +// ClusterDeploymentCustomizationSpec defines the desired state of ClusterDeploymentCustomization +type ClusterDeploymentCustomizationSpec struct { + // TODO: documentation + InstallConfigPatches []PatchEntity `json:"installConfigPatches,omitempty"` +} + +// TODO: documentation +type PatchEntity struct { + // +required + Op string `json:"op"` + // +required + Path string `json:"path"` + // +required + Value string `json:"value"` +} + +// ClusterDeploymentCustomizationStatus defines the observed state of ClusterDeploymentCustomization +type ClusterDeploymentCustomizationStatus struct { + // TODO: documentation + // +optional + ClusterDeploymentRef *corev1.LocalObjectReference `json:"clusterDeploymentRef,omitempty"` + + // Conditions includes more detailed status for the cluster deployment customization status. + // +optional + Conditions []ClusterDeploymentCustomizationCondition `json:"conditions,omitempty"` +} + +type ClusterDeploymentCustomizationCondition struct { + // Type is the type of the condition. + Type ClusterDeploymentCustomizationConditionType `json:"type"` + // Status is the status of the condition. + Status corev1.ConditionStatus `json:"status"` + // LastProbeTime is the last time we probed the condition. + // +optional + LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"` + // LastTransitionTime is the last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + // Reason is a unique, one-word, CamelCase reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty"` + // Message is a human-readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty"` +} + +// ClusterDeploymentCustomizationConditionType is a valid value for ClusterDeploymentCustomizationCondition.Type +type ClusterDeploymentCustomizationConditionType string + +const ( + // TODO: add more types + // TODO: shorter name? + ClusterDeploymentCustomizationAvailableCondition ClusterDeploymentCustomizationConditionType = "Available" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterDeploymentCustomizationLis contains the list of ClusterDeploymentCustomization +type ClusterDeploymentCustomizationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []ClusterDeploymentCustomization `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ClusterDeploymentCustomization{}, &ClusterDeploymentCustomizationList{}) +} diff --git a/apis/hive/v1/clusterpool_types.go b/apis/hive/v1/clusterpool_types.go index 245492d285d..32cf917d66f 100644 --- a/apis/hive/v1/clusterpool_types.go +++ b/apis/hive/v1/clusterpool_types.go @@ -88,6 +88,13 @@ type ClusterPoolSpec struct { // HibernationConfig configures the hibernation/resume behavior of ClusterDeployments owned by the ClusterPool. // +optional HibernationConfig *HibernationConfig `json:"hibernationConfig"` + + // Inventory customizes the cluster deployment inventory. + // TODO: improve Inventory description. + // Inventory maintains a list entries consumed by the clusterpool + // to customize the default the cluster deployment + // +optional + Inventory []InventoryEntry `json:"inventory,omitempty"` } type HibernationConfig struct { @@ -99,6 +106,22 @@ type HibernationConfig struct { ResumeTimeout metav1.Duration `json:"resumeTimeout"` } +// InventoryEntryKind in Kind of the inventory entry +// +kubebuilder:validation:Enum="";ClusterDeploymentCustomization +type InventoryEntryKind string + +const ClusterDeploymentCustomizationInventoryEntry InventoryEntryKind = "ClusterDeploymentCustomization" + +// InventoryEntry maintains a reference to a custom resource consumed by a clusterpool to customize the cluster deployment +type InventoryEntry struct { + // Kind denotes the kind of the referenced resource. The default is ClusterDeploymentCustomization, which is also currently the only supported value. + // +optional + Kind InventoryEntryKind `json:"kind,omitempty"` + // Name is the name of the referenced resource. + // +required + Name string `json:"name,omitempty"` +} + // ClusterPoolClaimLifetime defines the lifetimes for claims for the cluster pool. type ClusterPoolClaimLifetime struct { // Default is the default lifetime of the claim when no lifetime is set on the claim itself. @@ -168,6 +191,8 @@ const ( // ClusterPoolAllClustersCurrentCondition indicates whether all unassigned (installing or ready) // ClusterDeployments in the pool match the current configuration of the ClusterPool. ClusterPoolAllClustersCurrentCondition ClusterPoolConditionType = "AllClustersCurrent" + // ClusterPoolInventoryValidCondition is set to provide information on whether the cluster pool inventory is valid + ClusterPoolInventoryValidCondition ClusterPoolConditionType = "InventoryValid" ) // +genclient diff --git a/apis/hive/v1/zz_generated.deepcopy.go b/apis/hive/v1/zz_generated.deepcopy.go index f460058df33..4b21c871601 100644 --- a/apis/hive/v1/zz_generated.deepcopy.go +++ b/apis/hive/v1/zz_generated.deepcopy.go @@ -654,6 +654,134 @@ func (in *ClusterDeploymentCondition) DeepCopy() *ClusterDeploymentCondition { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeploymentCustomization) DeepCopyInto(out *ClusterDeploymentCustomization) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentCustomization. +func (in *ClusterDeploymentCustomization) DeepCopy() *ClusterDeploymentCustomization { + if in == nil { + return nil + } + out := new(ClusterDeploymentCustomization) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterDeploymentCustomization) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeploymentCustomizationCondition) DeepCopyInto(out *ClusterDeploymentCustomizationCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentCustomizationCondition. +func (in *ClusterDeploymentCustomizationCondition) DeepCopy() *ClusterDeploymentCustomizationCondition { + if in == nil { + return nil + } + out := new(ClusterDeploymentCustomizationCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeploymentCustomizationList) DeepCopyInto(out *ClusterDeploymentCustomizationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterDeploymentCustomization, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentCustomizationList. +func (in *ClusterDeploymentCustomizationList) DeepCopy() *ClusterDeploymentCustomizationList { + if in == nil { + return nil + } + out := new(ClusterDeploymentCustomizationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterDeploymentCustomizationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeploymentCustomizationSpec) DeepCopyInto(out *ClusterDeploymentCustomizationSpec) { + *out = *in + if in.InstallConfigPatches != nil { + in, out := &in.InstallConfigPatches, &out.InstallConfigPatches + *out = make([]PatchEntity, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentCustomizationSpec. +func (in *ClusterDeploymentCustomizationSpec) DeepCopy() *ClusterDeploymentCustomizationSpec { + if in == nil { + return nil + } + out := new(ClusterDeploymentCustomizationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeploymentCustomizationStatus) DeepCopyInto(out *ClusterDeploymentCustomizationStatus) { + *out = *in + if in.ClusterDeploymentRef != nil { + in, out := &in.ClusterDeploymentRef, &out.ClusterDeploymentRef + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ClusterDeploymentCustomizationCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentCustomizationStatus. +func (in *ClusterDeploymentCustomizationStatus) DeepCopy() *ClusterDeploymentCustomizationStatus { + if in == nil { + return nil + } + out := new(ClusterDeploymentCustomizationStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterDeploymentList) DeepCopyInto(out *ClusterDeploymentList) { *out = *in @@ -1309,6 +1437,11 @@ func (in *ClusterPoolReference) DeepCopyInto(out *ClusterPoolReference) { in, out := &in.ClaimedTimestamp, &out.ClaimedTimestamp *out = (*in).DeepCopy() } + if in.ClusterDeploymentCustomizationRef != nil { + in, out := &in.ClusterDeploymentCustomizationRef, &out.ClusterDeploymentCustomizationRef + *out = new(corev1.LocalObjectReference) + **out = **in + } return } @@ -1376,6 +1509,16 @@ func (in *ClusterPoolSpec) DeepCopyInto(out *ClusterPoolSpec) { *out = new(ClusterPoolClaimLifetime) (*in).DeepCopyInto(*out) } + if in.HibernationConfig != nil { + in, out := &in.HibernationConfig, &out.HibernationConfig + *out = new(HibernationConfig) + **out = **in + } + if in.Inventory != nil { + in, out := &in.Inventory, &out.Inventory + *out = make([]InventoryEntry, len(*in)) + copy(*out, *in) + } return } @@ -2234,6 +2377,23 @@ func (in *GCPDNSZoneStatus) DeepCopy() *GCPDNSZoneStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HibernationConfig) DeepCopyInto(out *HibernationConfig) { + *out = *in + out.ResumeTimeout = in.ResumeTimeout + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HibernationConfig. +func (in *HibernationConfig) DeepCopy() *HibernationConfig { + if in == nil { + return nil + } + out := new(HibernationConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HiveConfig) DeepCopyInto(out *HiveConfig) { *out = *in @@ -2424,6 +2584,22 @@ func (in *IdentityProviderStatus) DeepCopy() *IdentityProviderStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InventoryEntry) DeepCopyInto(out *InventoryEntry) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InventoryEntry. +func (in *InventoryEntry) DeepCopy() *InventoryEntry { + if in == nil { + return nil + } + out := new(InventoryEntry) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KubeconfigSecretReference) DeepCopyInto(out *KubeconfigSecretReference) { *out = *in @@ -2903,6 +3079,22 @@ func (in *OvirtClusterDeprovision) DeepCopy() *OvirtClusterDeprovision { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PatchEntity) DeepCopyInto(out *PatchEntity) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PatchEntity. +func (in *PatchEntity) DeepCopy() *PatchEntity { + if in == nil { + return nil + } + out := new(PatchEntity) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Platform) DeepCopyInto(out *Platform) { *out = *in diff --git a/cmd/hiveadmission/main.go b/cmd/hiveadmission/main.go index 3c8c243f23f..45fe777814c 100644 --- a/cmd/hiveadmission/main.go +++ b/cmd/hiveadmission/main.go @@ -30,6 +30,7 @@ func main() { hivevalidatingwebhooks.NewMachinePoolValidatingAdmissionHook(decoder), hivevalidatingwebhooks.NewSyncSetValidatingAdmissionHook(decoder), hivevalidatingwebhooks.NewSelectorSyncSetValidatingAdmissionHook(decoder), + hivevalidatingwebhooks.NewClusterDeploymentCustomizationValidatingAdmissionHook(decoder), ) } diff --git a/config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml b/config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml new file mode 100644 index 00000000000..bc679c087bb --- /dev/null +++ b/config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml @@ -0,0 +1,115 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.6.0 + creationTimestamp: null + name: clusterdeploymentcustomizations.hive.openshift.io +spec: + group: hive.openshift.io + names: + kind: ClusterDeploymentCustomization + listKind: ClusterDeploymentCustomizationList + plural: clusterdeploymentcustomizations + singular: clusterdeploymentcustomization + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: ClusterDeploymentCustomization is the Schema for clusterdeploymentcustomizations + API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ClusterDeploymentCustomizationSpec defines the desired state + of ClusterDeploymentCustomization + properties: + installConfigPatches: + description: 'TODO: documentation' + items: + description: 'TODO: documentation' + properties: + op: + type: string + path: + type: string + value: + type: string + required: + - op + - path + - value + type: object + type: array + type: object + status: + description: ClusterDeploymentCustomizationStatus defines the observed + state of ClusterDeploymentCustomization + properties: + clusterDeploymentRef: + description: 'TODO: documentation' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + conditions: + description: Conditions includes more detailed status for the cluster + deployment customization status. + items: + properties: + lastProbeTime: + description: LastProbeTime is the last time we probed the condition. + format: date-time + type: string + lastTransitionTime: + description: LastTransitionTime is the last time the condition + transitioned from one status to another. + format: date-time + type: string + message: + description: Message is a human-readable message indicating + details about last transition. + type: string + reason: + description: Reason is a unique, one-word, CamelCase reason + for the condition's last transition. + type: string + status: + description: Status is the status of the condition. + type: string + type: + description: Type is the type of the condition. + type: string + required: + - status + - type + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crds/hive.openshift.io_clusterdeployments.yaml b/config/crds/hive.openshift.io_clusterdeployments.yaml index b3c5eae56a6..07202101ebb 100644 --- a/config/crds/hive.openshift.io_clusterdeployments.yaml +++ b/config/crds/hive.openshift.io_clusterdeployments.yaml @@ -187,6 +187,15 @@ spec: belonging to ClusterPools. format: date-time type: string + clusterDeploymentCustomization: + description: ClusterDeploymentCustomizationRef is the ClusterPool + Inventory claimed customization for this ClusterDeployment + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object namespace: description: Namespace is the namespace where the ClusterPool resides. @@ -196,6 +205,7 @@ spec: the cluster was created. type: string required: + - clusterDeploymentCustomization - namespace - poolName type: object diff --git a/config/crds/hive.openshift.io_clusterpools.yaml b/config/crds/hive.openshift.io_clusterpools.yaml index 985699519b5..a1493e3bbb6 100644 --- a/config/crds/hive.openshift.io_clusterpools.yaml +++ b/config/crds/hive.openshift.io_clusterpools.yaml @@ -138,6 +138,28 @@ spec: TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object + inventory: + description: 'Inventory customizes the cluster deployment inventory. + TODO: improve Inventory description. Inventory maintains a list + entries consumed by the clusterpool to customize the default the + cluster deployment' + items: + description: InventoryEntry maintains a reference to a custom resource + consumed by a clusterpool to customize the cluster deployment + properties: + kind: + description: Kind denotes the kind of the referenced resource. + The default is ClusterDeploymentCustomization, which is also + currently the only supported value. + enum: + - "" + - ClusterDeploymentCustomization + type: string + name: + description: Name is the name of the referenced resource. + type: string + type: object + type: array labels: additionalProperties: type: string diff --git a/hack/app-sre/kustomization.yaml b/hack/app-sre/kustomization.yaml index 5941b588da1..9c3ff4d672f 100644 --- a/hack/app-sre/kustomization.yaml +++ b/hack/app-sre/kustomization.yaml @@ -29,6 +29,7 @@ resources: - ../../config/crds/hive.openshift.io_selectorsyncsets.yaml - ../../config/crds/hive.openshift.io_syncidentityproviders.yaml - ../../config/crds/hive.openshift.io_syncsets.yaml +- ../../config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization # Use app-sre-supplied variables to pull the image for the current commit diff --git a/hack/app-sre/saas-template.yaml b/hack/app-sre/saas-template.yaml index 84e5f29066a..f742dbd9ea9 100644 --- a/hack/app-sre/saas-template.yaml +++ b/hack/app-sre/saas-template.yaml @@ -249,6 +249,123 @@ objects: plural: '' conditions: [] storedVersions: [] +- apiVersion: apiextensions.k8s.io/v1 + kind: CustomResourceDefinition + metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.6.0 + creationTimestamp: null + name: clusterdeploymentcustomizations.hive.openshift.io + spec: + group: hive.openshift.io + names: + kind: ClusterDeploymentCustomization + listKind: ClusterDeploymentCustomizationList + plural: clusterdeploymentcustomizations + singular: clusterdeploymentcustomization + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: ClusterDeploymentCustomization is the Schema for clusterdeploymentcustomizations + API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint the + client submits requests to. Cannot be updated. In CamelCase. More + info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ClusterDeploymentCustomizationSpec defines the desired + state of ClusterDeploymentCustomization + properties: + installConfigPatches: + description: 'TODO: documentation' + items: + description: 'TODO: documentation' + properties: + op: + type: string + path: + type: string + value: + type: string + required: + - op + - path + - value + type: object + type: array + type: object + status: + description: ClusterDeploymentCustomizationStatus defines the observed + state of ClusterDeploymentCustomization + properties: + clusterDeploymentRef: + description: 'TODO: documentation' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + conditions: + description: Conditions includes more detailed status for the cluster + deployment customization status. + items: + properties: + lastProbeTime: + description: LastProbeTime is the last time we probed the + condition. + format: date-time + type: string + lastTransitionTime: + description: LastTransitionTime is the last time the condition + transitioned from one status to another. + format: date-time + type: string + message: + description: Message is a human-readable message indicating + details about last transition. + type: string + reason: + description: Reason is a unique, one-word, CamelCase reason + for the condition's last transition. + type: string + status: + description: Status is the status of the condition. + type: string + type: + description: Type is the type of the condition. + type: string + required: + - status + - type + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + status: + acceptedNames: + kind: '' + plural: '' + conditions: [] + storedVersions: [] - apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: @@ -440,6 +557,15 @@ objects: belonging to ClusterPools. format: date-time type: string + clusterDeploymentCustomization: + description: ClusterDeploymentCustomizationRef is the ClusterPool + Inventory claimed customization for this ClusterDeployment + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object namespace: description: Namespace is the namespace where the ClusterPool resides. @@ -449,6 +575,7 @@ objects: the cluster was created. type: string required: + - clusterDeploymentCustomization - namespace - poolName type: object @@ -1832,6 +1959,29 @@ objects: TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object + inventory: + description: 'Inventory customizes the cluster deployment inventory. + TODO: improve Inventory description. Inventory maintains a list + entries consumed by the clusterpool to customize the default the + cluster deployment' + items: + description: InventoryEntry maintains a reference to a custom + resource consumed by a clusterpool to customize the cluster + deployment + properties: + kind: + description: Kind denotes the kind of the referenced resource. + The default is ClusterDeploymentCustomization, which is + also currently the only supported value. + enum: + - '' + - ClusterDeploymentCustomization + type: string + name: + description: Name is the name of the referenced resource. + type: string + type: object + type: array labels: additionalProperties: type: string diff --git a/pkg/client/clientset/versioned/typed/hive/v1/clusterdeploymentcustomization.go b/pkg/client/clientset/versioned/typed/hive/v1/clusterdeploymentcustomization.go new file mode 100644 index 00000000000..dd7ce990a81 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/hive/v1/clusterdeploymentcustomization.go @@ -0,0 +1,162 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/openshift/hive/apis/hive/v1" + scheme "github.com/openshift/hive/pkg/client/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ClusterDeploymentCustomizationsGetter has a method to return a ClusterDeploymentCustomizationInterface. +// A group's client should implement this interface. +type ClusterDeploymentCustomizationsGetter interface { + ClusterDeploymentCustomizations(namespace string) ClusterDeploymentCustomizationInterface +} + +// ClusterDeploymentCustomizationInterface has methods to work with ClusterDeploymentCustomization resources. +type ClusterDeploymentCustomizationInterface interface { + Create(ctx context.Context, clusterDeploymentCustomization *v1.ClusterDeploymentCustomization, opts metav1.CreateOptions) (*v1.ClusterDeploymentCustomization, error) + Update(ctx context.Context, clusterDeploymentCustomization *v1.ClusterDeploymentCustomization, opts metav1.UpdateOptions) (*v1.ClusterDeploymentCustomization, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ClusterDeploymentCustomization, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.ClusterDeploymentCustomizationList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterDeploymentCustomization, err error) + ClusterDeploymentCustomizationExpansion +} + +// clusterDeploymentCustomizations implements ClusterDeploymentCustomizationInterface +type clusterDeploymentCustomizations struct { + client rest.Interface + ns string +} + +// newClusterDeploymentCustomizations returns a ClusterDeploymentCustomizations +func newClusterDeploymentCustomizations(c *HiveV1Client, namespace string) *clusterDeploymentCustomizations { + return &clusterDeploymentCustomizations{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the clusterDeploymentCustomization, and returns the corresponding clusterDeploymentCustomization object, and an error if there is any. +func (c *clusterDeploymentCustomizations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterDeploymentCustomization, err error) { + result = &v1.ClusterDeploymentCustomization{} + err = c.client.Get(). + Namespace(c.ns). + Resource("clusterdeploymentcustomizations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterDeploymentCustomizations that match those selectors. +func (c *clusterDeploymentCustomizations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterDeploymentCustomizationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.ClusterDeploymentCustomizationList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("clusterdeploymentcustomizations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterDeploymentCustomizations. +func (c *clusterDeploymentCustomizations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("clusterdeploymentcustomizations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a clusterDeploymentCustomization and creates it. Returns the server's representation of the clusterDeploymentCustomization, and an error, if there is any. +func (c *clusterDeploymentCustomizations) Create(ctx context.Context, clusterDeploymentCustomization *v1.ClusterDeploymentCustomization, opts metav1.CreateOptions) (result *v1.ClusterDeploymentCustomization, err error) { + result = &v1.ClusterDeploymentCustomization{} + err = c.client.Post(). + Namespace(c.ns). + Resource("clusterdeploymentcustomizations"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(clusterDeploymentCustomization). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a clusterDeploymentCustomization and updates it. Returns the server's representation of the clusterDeploymentCustomization, and an error, if there is any. +func (c *clusterDeploymentCustomizations) Update(ctx context.Context, clusterDeploymentCustomization *v1.ClusterDeploymentCustomization, opts metav1.UpdateOptions) (result *v1.ClusterDeploymentCustomization, err error) { + result = &v1.ClusterDeploymentCustomization{} + err = c.client.Put(). + Namespace(c.ns). + Resource("clusterdeploymentcustomizations"). + Name(clusterDeploymentCustomization.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(clusterDeploymentCustomization). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the clusterDeploymentCustomization and deletes it. Returns an error if one occurs. +func (c *clusterDeploymentCustomizations) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("clusterdeploymentcustomizations"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterDeploymentCustomizations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("clusterdeploymentcustomizations"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched clusterDeploymentCustomization. +func (c *clusterDeploymentCustomizations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterDeploymentCustomization, err error) { + result = &v1.ClusterDeploymentCustomization{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("clusterdeploymentcustomizations"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/pkg/client/clientset/versioned/typed/hive/v1/fake/fake_clusterdeploymentcustomization.go b/pkg/client/clientset/versioned/typed/hive/v1/fake/fake_clusterdeploymentcustomization.go new file mode 100644 index 00000000000..be7861682b5 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/hive/v1/fake/fake_clusterdeploymentcustomization.go @@ -0,0 +1,114 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + hivev1 "github.com/openshift/hive/apis/hive/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeClusterDeploymentCustomizations implements ClusterDeploymentCustomizationInterface +type FakeClusterDeploymentCustomizations struct { + Fake *FakeHiveV1 + ns string +} + +var clusterdeploymentcustomizationsResource = schema.GroupVersionResource{Group: "hive.openshift.io", Version: "v1", Resource: "clusterdeploymentcustomizations"} + +var clusterdeploymentcustomizationsKind = schema.GroupVersionKind{Group: "hive.openshift.io", Version: "v1", Kind: "ClusterDeploymentCustomization"} + +// Get takes name of the clusterDeploymentCustomization, and returns the corresponding clusterDeploymentCustomization object, and an error if there is any. +func (c *FakeClusterDeploymentCustomizations) Get(ctx context.Context, name string, options v1.GetOptions) (result *hivev1.ClusterDeploymentCustomization, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(clusterdeploymentcustomizationsResource, c.ns, name), &hivev1.ClusterDeploymentCustomization{}) + + if obj == nil { + return nil, err + } + return obj.(*hivev1.ClusterDeploymentCustomization), err +} + +// List takes label and field selectors, and returns the list of ClusterDeploymentCustomizations that match those selectors. +func (c *FakeClusterDeploymentCustomizations) List(ctx context.Context, opts v1.ListOptions) (result *hivev1.ClusterDeploymentCustomizationList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(clusterdeploymentcustomizationsResource, clusterdeploymentcustomizationsKind, c.ns, opts), &hivev1.ClusterDeploymentCustomizationList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &hivev1.ClusterDeploymentCustomizationList{ListMeta: obj.(*hivev1.ClusterDeploymentCustomizationList).ListMeta} + for _, item := range obj.(*hivev1.ClusterDeploymentCustomizationList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested clusterDeploymentCustomizations. +func (c *FakeClusterDeploymentCustomizations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(clusterdeploymentcustomizationsResource, c.ns, opts)) + +} + +// Create takes the representation of a clusterDeploymentCustomization and creates it. Returns the server's representation of the clusterDeploymentCustomization, and an error, if there is any. +func (c *FakeClusterDeploymentCustomizations) Create(ctx context.Context, clusterDeploymentCustomization *hivev1.ClusterDeploymentCustomization, opts v1.CreateOptions) (result *hivev1.ClusterDeploymentCustomization, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(clusterdeploymentcustomizationsResource, c.ns, clusterDeploymentCustomization), &hivev1.ClusterDeploymentCustomization{}) + + if obj == nil { + return nil, err + } + return obj.(*hivev1.ClusterDeploymentCustomization), err +} + +// Update takes the representation of a clusterDeploymentCustomization and updates it. Returns the server's representation of the clusterDeploymentCustomization, and an error, if there is any. +func (c *FakeClusterDeploymentCustomizations) Update(ctx context.Context, clusterDeploymentCustomization *hivev1.ClusterDeploymentCustomization, opts v1.UpdateOptions) (result *hivev1.ClusterDeploymentCustomization, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(clusterdeploymentcustomizationsResource, c.ns, clusterDeploymentCustomization), &hivev1.ClusterDeploymentCustomization{}) + + if obj == nil { + return nil, err + } + return obj.(*hivev1.ClusterDeploymentCustomization), err +} + +// Delete takes name of the clusterDeploymentCustomization and deletes it. Returns an error if one occurs. +func (c *FakeClusterDeploymentCustomizations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(clusterdeploymentcustomizationsResource, c.ns, name), &hivev1.ClusterDeploymentCustomization{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeClusterDeploymentCustomizations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(clusterdeploymentcustomizationsResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &hivev1.ClusterDeploymentCustomizationList{}) + return err +} + +// Patch applies the patch and returns the patched clusterDeploymentCustomization. +func (c *FakeClusterDeploymentCustomizations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *hivev1.ClusterDeploymentCustomization, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(clusterdeploymentcustomizationsResource, c.ns, name, pt, data, subresources...), &hivev1.ClusterDeploymentCustomization{}) + + if obj == nil { + return nil, err + } + return obj.(*hivev1.ClusterDeploymentCustomization), err +} diff --git a/pkg/client/clientset/versioned/typed/hive/v1/fake/fake_hive_client.go b/pkg/client/clientset/versioned/typed/hive/v1/fake/fake_hive_client.go index d52a518c108..06f12520fe7 100644 --- a/pkg/client/clientset/versioned/typed/hive/v1/fake/fake_hive_client.go +++ b/pkg/client/clientset/versioned/typed/hive/v1/fake/fake_hive_client.go @@ -24,6 +24,10 @@ func (c *FakeHiveV1) ClusterDeployments(namespace string) v1.ClusterDeploymentIn return &FakeClusterDeployments{c, namespace} } +func (c *FakeHiveV1) ClusterDeploymentCustomizations(namespace string) v1.ClusterDeploymentCustomizationInterface { + return &FakeClusterDeploymentCustomizations{c, namespace} +} + func (c *FakeHiveV1) ClusterDeprovisions(namespace string) v1.ClusterDeprovisionInterface { return &FakeClusterDeprovisions{c, namespace} } diff --git a/pkg/client/clientset/versioned/typed/hive/v1/generated_expansion.go b/pkg/client/clientset/versioned/typed/hive/v1/generated_expansion.go index 600401a271d..951ab87652b 100644 --- a/pkg/client/clientset/versioned/typed/hive/v1/generated_expansion.go +++ b/pkg/client/clientset/versioned/typed/hive/v1/generated_expansion.go @@ -8,6 +8,8 @@ type ClusterClaimExpansion interface{} type ClusterDeploymentExpansion interface{} +type ClusterDeploymentCustomizationExpansion interface{} + type ClusterDeprovisionExpansion interface{} type ClusterImageSetExpansion interface{} diff --git a/pkg/client/clientset/versioned/typed/hive/v1/hive_client.go b/pkg/client/clientset/versioned/typed/hive/v1/hive_client.go index a80c6432e43..a123619ce71 100644 --- a/pkg/client/clientset/versioned/typed/hive/v1/hive_client.go +++ b/pkg/client/clientset/versioned/typed/hive/v1/hive_client.go @@ -13,6 +13,7 @@ type HiveV1Interface interface { CheckpointsGetter ClusterClaimsGetter ClusterDeploymentsGetter + ClusterDeploymentCustomizationsGetter ClusterDeprovisionsGetter ClusterImageSetsGetter ClusterPoolsGetter @@ -46,6 +47,10 @@ func (c *HiveV1Client) ClusterDeployments(namespace string) ClusterDeploymentInt return newClusterDeployments(c, namespace) } +func (c *HiveV1Client) ClusterDeploymentCustomizations(namespace string) ClusterDeploymentCustomizationInterface { + return newClusterDeploymentCustomizations(c, namespace) +} + func (c *HiveV1Client) ClusterDeprovisions(namespace string) ClusterDeprovisionInterface { return newClusterDeprovisions(c, namespace) } diff --git a/pkg/client/informers/externalversions/generic.go b/pkg/client/informers/externalversions/generic.go index 71f7068525e..296f8270454 100644 --- a/pkg/client/informers/externalversions/generic.go +++ b/pkg/client/informers/externalversions/generic.go @@ -44,6 +44,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Hive().V1().ClusterClaims().Informer()}, nil case v1.SchemeGroupVersion.WithResource("clusterdeployments"): return &genericInformer{resource: resource.GroupResource(), informer: f.Hive().V1().ClusterDeployments().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("clusterdeploymentcustomizations"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Hive().V1().ClusterDeploymentCustomizations().Informer()}, nil case v1.SchemeGroupVersion.WithResource("clusterdeprovisions"): return &genericInformer{resource: resource.GroupResource(), informer: f.Hive().V1().ClusterDeprovisions().Informer()}, nil case v1.SchemeGroupVersion.WithResource("clusterimagesets"): diff --git a/pkg/client/informers/externalversions/hive/v1/clusterdeploymentcustomization.go b/pkg/client/informers/externalversions/hive/v1/clusterdeploymentcustomization.go new file mode 100644 index 00000000000..637b3b707f7 --- /dev/null +++ b/pkg/client/informers/externalversions/hive/v1/clusterdeploymentcustomization.go @@ -0,0 +1,74 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + hivev1 "github.com/openshift/hive/apis/hive/v1" + versioned "github.com/openshift/hive/pkg/client/clientset/versioned" + internalinterfaces "github.com/openshift/hive/pkg/client/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/hive/pkg/client/listers/hive/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ClusterDeploymentCustomizationInformer provides access to a shared informer and lister for +// ClusterDeploymentCustomizations. +type ClusterDeploymentCustomizationInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ClusterDeploymentCustomizationLister +} + +type clusterDeploymentCustomizationInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewClusterDeploymentCustomizationInformer constructs a new informer for ClusterDeploymentCustomization type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewClusterDeploymentCustomizationInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterDeploymentCustomizationInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredClusterDeploymentCustomizationInformer constructs a new informer for ClusterDeploymentCustomization type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredClusterDeploymentCustomizationInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.HiveV1().ClusterDeploymentCustomizations(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.HiveV1().ClusterDeploymentCustomizations(namespace).Watch(context.TODO(), options) + }, + }, + &hivev1.ClusterDeploymentCustomization{}, + resyncPeriod, + indexers, + ) +} + +func (f *clusterDeploymentCustomizationInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterDeploymentCustomizationInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *clusterDeploymentCustomizationInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&hivev1.ClusterDeploymentCustomization{}, f.defaultInformer) +} + +func (f *clusterDeploymentCustomizationInformer) Lister() v1.ClusterDeploymentCustomizationLister { + return v1.NewClusterDeploymentCustomizationLister(f.Informer().GetIndexer()) +} diff --git a/pkg/client/informers/externalversions/hive/v1/interface.go b/pkg/client/informers/externalversions/hive/v1/interface.go index d73a5da9ab5..0659201eae6 100644 --- a/pkg/client/informers/externalversions/hive/v1/interface.go +++ b/pkg/client/informers/externalversions/hive/v1/interface.go @@ -14,6 +14,8 @@ type Interface interface { ClusterClaims() ClusterClaimInformer // ClusterDeployments returns a ClusterDeploymentInformer. ClusterDeployments() ClusterDeploymentInformer + // ClusterDeploymentCustomizations returns a ClusterDeploymentCustomizationInformer. + ClusterDeploymentCustomizations() ClusterDeploymentCustomizationInformer // ClusterDeprovisions returns a ClusterDeprovisionInformer. ClusterDeprovisions() ClusterDeprovisionInformer // ClusterImageSets returns a ClusterImageSetInformer. @@ -70,6 +72,11 @@ func (v *version) ClusterDeployments() ClusterDeploymentInformer { return &clusterDeploymentInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } +// ClusterDeploymentCustomizations returns a ClusterDeploymentCustomizationInformer. +func (v *version) ClusterDeploymentCustomizations() ClusterDeploymentCustomizationInformer { + return &clusterDeploymentCustomizationInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + // ClusterDeprovisions returns a ClusterDeprovisionInformer. func (v *version) ClusterDeprovisions() ClusterDeprovisionInformer { return &clusterDeprovisionInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} diff --git a/pkg/client/listers/hive/v1/clusterdeploymentcustomization.go b/pkg/client/listers/hive/v1/clusterdeploymentcustomization.go new file mode 100644 index 00000000000..dc1ff4923ad --- /dev/null +++ b/pkg/client/listers/hive/v1/clusterdeploymentcustomization.go @@ -0,0 +1,83 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/hive/apis/hive/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ClusterDeploymentCustomizationLister helps list ClusterDeploymentCustomizations. +// All objects returned here must be treated as read-only. +type ClusterDeploymentCustomizationLister interface { + // List lists all ClusterDeploymentCustomizations in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ClusterDeploymentCustomization, err error) + // ClusterDeploymentCustomizations returns an object that can list and get ClusterDeploymentCustomizations. + ClusterDeploymentCustomizations(namespace string) ClusterDeploymentCustomizationNamespaceLister + ClusterDeploymentCustomizationListerExpansion +} + +// clusterDeploymentCustomizationLister implements the ClusterDeploymentCustomizationLister interface. +type clusterDeploymentCustomizationLister struct { + indexer cache.Indexer +} + +// NewClusterDeploymentCustomizationLister returns a new ClusterDeploymentCustomizationLister. +func NewClusterDeploymentCustomizationLister(indexer cache.Indexer) ClusterDeploymentCustomizationLister { + return &clusterDeploymentCustomizationLister{indexer: indexer} +} + +// List lists all ClusterDeploymentCustomizations in the indexer. +func (s *clusterDeploymentCustomizationLister) List(selector labels.Selector) (ret []*v1.ClusterDeploymentCustomization, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ClusterDeploymentCustomization)) + }) + return ret, err +} + +// ClusterDeploymentCustomizations returns an object that can list and get ClusterDeploymentCustomizations. +func (s *clusterDeploymentCustomizationLister) ClusterDeploymentCustomizations(namespace string) ClusterDeploymentCustomizationNamespaceLister { + return clusterDeploymentCustomizationNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ClusterDeploymentCustomizationNamespaceLister helps list and get ClusterDeploymentCustomizations. +// All objects returned here must be treated as read-only. +type ClusterDeploymentCustomizationNamespaceLister interface { + // List lists all ClusterDeploymentCustomizations in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ClusterDeploymentCustomization, err error) + // Get retrieves the ClusterDeploymentCustomization from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.ClusterDeploymentCustomization, error) + ClusterDeploymentCustomizationNamespaceListerExpansion +} + +// clusterDeploymentCustomizationNamespaceLister implements the ClusterDeploymentCustomizationNamespaceLister +// interface. +type clusterDeploymentCustomizationNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ClusterDeploymentCustomizations in the indexer for a given namespace. +func (s clusterDeploymentCustomizationNamespaceLister) List(selector labels.Selector) (ret []*v1.ClusterDeploymentCustomization, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ClusterDeploymentCustomization)) + }) + return ret, err +} + +// Get retrieves the ClusterDeploymentCustomization from the indexer for a given namespace and name. +func (s clusterDeploymentCustomizationNamespaceLister) Get(name string) (*v1.ClusterDeploymentCustomization, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("clusterdeploymentcustomization"), name) + } + return obj.(*v1.ClusterDeploymentCustomization), nil +} diff --git a/pkg/client/listers/hive/v1/expansion_generated.go b/pkg/client/listers/hive/v1/expansion_generated.go index 2f913b5fa55..b07f9b98377 100644 --- a/pkg/client/listers/hive/v1/expansion_generated.go +++ b/pkg/client/listers/hive/v1/expansion_generated.go @@ -26,6 +26,14 @@ type ClusterDeploymentListerExpansion interface{} // ClusterDeploymentNamespaceLister. type ClusterDeploymentNamespaceListerExpansion interface{} +// ClusterDeploymentCustomizationListerExpansion allows custom methods to be added to +// ClusterDeploymentCustomizationLister. +type ClusterDeploymentCustomizationListerExpansion interface{} + +// ClusterDeploymentCustomizationNamespaceListerExpansion allows custom methods to be added to +// ClusterDeploymentCustomizationNamespaceLister. +type ClusterDeploymentCustomizationNamespaceListerExpansion interface{} + // ClusterDeprovisionListerExpansion allows custom methods to be added to // ClusterDeprovisionLister. type ClusterDeprovisionListerExpansion interface{} diff --git a/pkg/clusterresource/openstack.go b/pkg/clusterresource/openstack.go index ee6be5fa338..f0cbc53d39d 100644 --- a/pkg/clusterresource/openstack.go +++ b/pkg/clusterresource/openstack.go @@ -43,6 +43,13 @@ type OpenStackCloudBuilder struct { MasterFlavor string } +func NewOpenStackCloudBuilderFromSecret(credsSecret *corev1.Secret) *OpenStackCloudBuilder { + cloudsYamlContent := credsSecret.Data[constants.OpenStackCredentialsName] + return &OpenStackCloudBuilder{ + CloudsYAMLContent: cloudsYamlContent, + } +} + func (p *OpenStackCloudBuilder) GenerateCredentialsSecret(o *Builder) *corev1.Secret { return &corev1.Secret{ TypeMeta: metav1.TypeMeta{ diff --git a/pkg/controller/clusterdeployment/clusterdeployment_controller.go b/pkg/controller/clusterdeployment/clusterdeployment_controller.go index 35140916044..d11fa95a718 100644 --- a/pkg/controller/clusterdeployment/clusterdeployment_controller.go +++ b/pkg/controller/clusterdeployment/clusterdeployment_controller.go @@ -507,7 +507,7 @@ func (r *ReconcileClusterDeployment) reconcile(request reconcile.Request, cd *hi } if cd.DeletionTimestamp != nil { - if !controllerutils.HasFinalizer(cd, hivev1.FinalizerDeprovision) { + if !controllerutils.HasFinalizer(cd, hivev1.FinalizerDeprovision) && !controllerutils.HasFinalizer(cd, hivev1.FinalizerCustomizationRelease) { // Make sure we have no deprovision underway metric even though this was probably cleared when we // removed the finalizer. clearDeprovisionUnderwaySecondsMetric(cd, cdLog) @@ -569,6 +569,16 @@ func (r *ReconcileClusterDeployment) reconcile(request reconcile.Request, cd *hi return reconcile.Result{}, nil } + if cd.Spec.ClusterPoolRef.ClusterDeploymentCustomizationRef != nil { + if !controllerutils.HasFinalizer(cd, hivev1.FinalizerCustomizationRelease) { + cdLog.Debugf("adding customization finalizer") + if err := r.addClusterDeploymentCustomizationFinalizer(cd); err != nil { + cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error adding finalizer") + return reconcile.Result{}, err + } + } + } + if cd.Spec.ManageDNS { dnsZone, err := r.ensureManagedDNSZone(cd, cdLog) if err != nil { @@ -1364,6 +1374,11 @@ func (r *ReconcileClusterDeployment) syncDeletedClusterDeployment(cd *hivev1.Clu case !dnsZoneGone: return reconcile.Result{RequeueAfter: defaultRequeueTime}, nil default: + cdLog.Infof("Deprovision request completed, releasing inventory customization") + if err := r.releaseClusterDeploymentCustomization(cd, cdLog); err != nil { + cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error releasing inventory customization") + return reconcile.Result{}, err + } cdLog.Infof("DNSZone gone and deprovision request completed, removing finalizer") if err := r.removeClusterDeploymentFinalizer(cd, cdLog); err != nil { cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error removing finalizer") @@ -1379,6 +1394,12 @@ func (r *ReconcileClusterDeployment) addClusterDeploymentFinalizer(cd *hivev1.Cl return r.Update(context.TODO(), cd) } +func (r *ReconcileClusterDeployment) addClusterDeploymentCustomizationFinalizer(cd *hivev1.ClusterDeployment) error { + cd = cd.DeepCopy() + controllerutils.AddFinalizer(cd, hivev1.FinalizerCustomizationRelease) + return r.Update(context.TODO(), cd) +} + func (r *ReconcileClusterDeployment) removeClusterDeploymentFinalizer(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) error { cd = cd.DeepCopy() @@ -1395,6 +1416,42 @@ func (r *ReconcileClusterDeployment) removeClusterDeploymentFinalizer(cd *hivev1 return nil } +func (r *ReconcileClusterDeployment) releaseClusterDeploymentCustomization(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) error { + cd = cd.DeepCopy() + customizationRef := cd.Spec.ClusterPoolRef.ClusterDeploymentCustomizationRef + namespace := types.NamespacedName{Namespace: cd.Namespace, Name: customizationRef.Name} + cdc := &hivev1.ClusterDeploymentCustomization{} + err := r.Get(context.TODO(), namespace, cdc) + if err != nil { + if apierrors.IsNotFound(err) { + cdLog.Infof("customization not found: %s", customizationRef.Name) + } + log.WithError(err).Error("error reading customization") + return err + } + + cdc.Status.ClusterDeploymentRef = nil + cdc.Status.Conditions = controllerutils.SetClusterDeploymentCustomizationCondition( + cdc.Status.Conditions, + hivev1.ClusterDeploymentCustomizationAvailableCondition, + corev1.ConditionTrue, + "ClusterDeploymentCustomizationReleased", + "Cluster Deployment Customization was released", + controllerutils.UpdateConditionIfReasonOrMessageChange, + ) + + if err := r.Update(context.TODO(), cdc); err != nil { + return err + } + + controllerutils.DeleteFinalizer(cd, hivev1.FinalizerCustomizationRelease) + if err := r.Update(context.TODO(), cd); err != nil { + return err + } + + return nil +} + // setDNSDelayMetric will calculate the amount of time elapsed from clusterdeployment creation // to when the dnszone became ready, and set a metric to report the delay. // Will return a bool indicating whether the clusterdeployment has been modified, and whether any error was encountered. diff --git a/pkg/controller/clusterpool/clusterpool_controller.go b/pkg/controller/clusterpool/clusterpool_controller.go index a7076201178..04b141f3fa6 100644 --- a/pkg/controller/clusterpool/clusterpool_controller.go +++ b/pkg/controller/clusterpool/clusterpool_controller.go @@ -2,16 +2,19 @@ package clusterpool import ( "context" + "encoding/json" "fmt" "math" "reflect" "sort" + "github.com/ghodss/yaml" "github.com/pkg/errors" log "github.com/sirupsen/logrus" utilerrors "k8s.io/apimachinery/pkg/util/errors" "github.com/davegardnerisme/deephash" + jsonpatch "github.com/evanphx/json-patch" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -263,6 +266,23 @@ func (r *ReconcileClusterPool) Reconcile(ctx context.Context, request reconcile. return reconcile.Result{}, nil } + // Initialize cluster pool inventory conditions if not set + for _, entry := range clp.Spec.Inventory { + cdc := &hivev1.ClusterDeploymentCustomization{} + ref := types.NamespacedName{Namespace: clp.Namespace, Name: entry.Name} + r.Get(context.TODO(), ref, cdc) + if len(cdc.Status.Conditions) == 0 { + controllerutils.SetClusterDeploymentCustomizationCondition( + cdc.Status.Conditions, + hivev1.ClusterDeploymentCustomizationAvailableCondition, + corev1.ConditionUnknown, + hivev1.InitializedConditionReason, + "condition initialized", + controllerutils.UpdateConditionAlways, + ) + } + } + // If the pool is deleted, clear finalizer once all ClusterDeployments have been deleted. if clp.DeletionTimestamp != nil { return reconcile.Result{}, r.reconcileDeletedPool(clp, logger) @@ -480,6 +500,9 @@ func calculatePoolVersion(clp *hivev1.ClusterPool) string { ba = append(ba, deephash.Hash(clp.Spec.BaseDomain)...) ba = append(ba, deephash.Hash(clp.Spec.ImageSetRef)...) ba = append(ba, deephash.Hash(clp.Spec.InstallConfigSecretTemplateRef)...) + if clp.Spec.Inventory != nil { + ba = append(ba, []byte("hasInventory")...) + } // Hash of hashes to ensure fixed length return fmt.Sprintf("%x", deephash.Hash(ba)) } @@ -667,6 +690,15 @@ func (r *ReconcileClusterPool) createCluster( } annotations[constants.ClusterDeploymentPoolSpecHashAnnotation] = poolVersion + cdc := &hivev1.ClusterDeploymentCustomization{} + if clp.Spec.Inventory != nil { + cdc, err = r.getInventoryCustomization(clp, logger) + if err != nil { + logger.WithError(err).Error("error obtaining inventory customization") + return nil, err + } + } + // We will use this unique random namespace name for our cluster name. builder := &clusterresource.Builder{ Name: ns.Name, @@ -696,6 +728,7 @@ func (r *ReconcileClusterPool) createCluster( poolKey := types.NamespacedName{Namespace: clp.Namespace, Name: clp.Name}.String() r.expectations.ExpectCreations(poolKey, 1) var cd *hivev1.ClusterDeployment + var ics *corev1.Secret // Add the ClusterPoolRef to the ClusterDeployment, and move it to the end of the slice. for i, obj := range objs { var ok bool @@ -704,10 +737,39 @@ func (r *ReconcileClusterPool) createCluster( continue } poolRef := poolReference(clp) + if clp.Spec.Inventory != nil { + poolRef.ClusterDeploymentCustomizationRef = &corev1.LocalObjectReference{Name: cdc.Name} + } cd.Spec.ClusterPoolRef = &poolRef lastIndex := len(objs) - 1 objs[i], objs[lastIndex] = objs[lastIndex], objs[i] } + // Apply inventory customization + if clp.Spec.Inventory != nil { + for _, obj := range objs { + var ok bool + if ics, ok = obj.(*corev1.Secret); ok { + installConfig, ok := ics.StringData["install-config.yaml"] + if ok { + newInstallConfig, err := r.getCustomizedInstallConfig([]byte(installConfig), cdc, logger) + if err != nil { + r.setInventoryValidCondition(clp, false, fmt.Sprintf("failed to customize with %s", cdc.Name), logger) + cdc.Status.Conditions = controllerutils.SetClusterDeploymentCustomizationCondition( + cdc.Status.Conditions, + hivev1.ClusterDeploymentCustomizationAvailableCondition, + corev1.ConditionFalse, + "CustomizationFailed", + "Failed to customize install config", + controllerutils.UpdateConditionIfReasonOrMessageChange, + ) + return nil, errors.Wrapf(err, "failed to customize install config") + } + ics.StringData["install-config.yaml"] = string(newInstallConfig) + } + } + } + } + // Create the resources. for _, obj := range objs { if err := r.Client.Create(context.Background(), obj.(client.Object)); err != nil { @@ -914,6 +976,35 @@ func (r *ReconcileClusterPool) setAvailableCapacityCondition(pool *hivev1.Cluste return nil } +func (r *ReconcileClusterPool) setInventoryValidCondition(pool *hivev1.ClusterPool, valid bool, msg string, logger log.FieldLogger) error { + status := corev1.ConditionTrue + reason := "Valid" // Maybe a different readon? + message := "Inventory customization succesfuly applied and reserved" + updateConditionCheck := controllerutils.UpdateConditionNever + if !valid { + status = corev1.ConditionFalse + reason = "Invalid" + message = msg + updateConditionCheck = controllerutils.UpdateConditionIfReasonOrMessageChange + } + conds, changed := controllerutils.SetClusterPoolConditionWithChangeCheck( + pool.Status.Conditions, + hivev1.ClusterPoolInventoryValidCondition, + status, + reason, + message, + updateConditionCheck, + ) + if changed { + pool.Status.Conditions = conds + if err := r.Status().Update(context.Background(), pool); err != nil { + logger.WithError(err).Log(controllerutils.LogLevel(err), "could not update ClusterPool conditions") + return errors.Wrap(err, "could not update ClusterPool conditions") + } + } + return nil +} + func (r *ReconcileClusterPool) verifyClusterImageSet(pool *hivev1.ClusterPool, logger log.FieldLogger) error { err := r.Get(context.Background(), client.ObjectKey{Name: pool.Spec.ImageSetRef.Name}, &hivev1.ClusterImageSet{}) if err != nil { @@ -1006,9 +1097,131 @@ func (r *ReconcileClusterPool) createCloudBuilder(pool *hivev1.ClusterPool, logg cloudBuilder.Region = platform.Azure.Region cloudBuilder.CloudName = platform.Azure.CloudName return cloudBuilder, nil - // TODO: OpenStack, VMware, and Ovirt. + case platform.OpenStack != nil: + credsSecret, err := r.getCredentialsSecret(pool, platform.OpenStack.CredentialsSecretRef.Name, logger) + if err != nil { + return nil, err + } + cloudBuilder := clusterresource.NewOpenStackCloudBuilderFromSecret(credsSecret) + cloudBuilder.Cloud = platform.OpenStack.Cloud + return cloudBuilder, nil + // TODO: VMware, and Ovirt. default: logger.Info("unsupported platform") return nil, errors.New("unsupported platform") } } + +// INFO: [Fairness](https://github.com/openshift/hive/blob/master/docs/enhancements/clusterpool-inventory.md#fairness) +// The function loops over the list of inventory items and picks the first available customization. +// Failing to apply a customization (in any cluster pool) will cause to change its status to unvailable and a new cluster will be queued. +func (r *ReconcileClusterPool) getInventoryCustomization(pool *hivev1.ClusterPool, logger log.FieldLogger) (*hivev1.ClusterDeploymentCustomization, error) { + cdc := &hivev1.ClusterDeploymentCustomization{} + for _, entry := range pool.Spec.Inventory { + if entry.Kind == hivev1.ClusterDeploymentCustomizationInventoryEntry || entry.Kind == "" { + logger.Debugf("processing customization %s for cluster pool %s", entry.Name, pool.Name) + ref := types.NamespacedName{Namespace: pool.Namespace, Name: entry.Name} + err := r.Get(context.TODO(), ref, cdc) + if err != nil { + if apierrors.IsNotFound(err) { + r.setInventoryValidCondition(pool, false, fmt.Sprintf("customization not found: %s", entry.Name), logger) + } + log.WithError(err).Error("error reading customization") + continue + } + + currentAvailability := controllerutils.FindClusterDeploymentCustomizationCondition( + cdc.Status.Conditions, + hivev1.ClusterDeploymentCustomizationAvailableCondition, + ) + + // Customization available + if currentAvailability == nil || currentAvailability.Status == corev1.ConditionTrue { + cdc.Status.Conditions = controllerutils.SetClusterDeploymentCustomizationCondition( + cdc.Status.Conditions, + hivev1.ClusterDeploymentCustomizationAvailableCondition, + corev1.ConditionFalse, + "processing", + "processing", + controllerutils.UpdateConditionIfReasonOrMessageChange, + ) + if err := r.Status().Update(context.Background(), cdc); err != nil { + logger.WithError(err).Log(controllerutils.LogLevel(err), "could not update ClusterDeploymentCustomization conditions") + continue + } + + return cdc, nil + } + } + logger.Debugf("customization %s is invalid", entry.Name) + } + r.setInventoryValidCondition(pool, false, "not enough customizations for cluster size", logger) + return nil, errors.New(fmt.Sprintf("no customization available")) +} + +func (r *ReconcileClusterPool) getCustomizedInstallConfig(installConfig []byte, cdc *hivev1.ClusterDeploymentCustomization, logger log.FieldLogger) ([]byte, error) { + + newInstallConfig, err := applyPatches(cdc.Spec.InstallConfigPatches, installConfig, logger) + if err != nil { + log.WithError(err).Error("faild to apply customization patches") + cdc.Status.Conditions = controllerutils.SetClusterDeploymentCustomizationCondition( + cdc.Status.Conditions, + hivev1.ClusterDeploymentCustomizationAvailableCondition, + corev1.ConditionFalse, + "failed", + "failed", + controllerutils.UpdateConditionIfReasonOrMessageChange, + ) + if err := r.Status().Update(context.Background(), cdc); err != nil { + logger.WithError(err).Log(controllerutils.LogLevel(err), "could not update ClusterDeploymentCustomization conditions") + } + + return installConfig, err + } + + cdc.Status.Conditions = controllerutils.SetClusterDeploymentCustomizationCondition( + cdc.Status.Conditions, + hivev1.ClusterDeploymentCustomizationAvailableCondition, + corev1.ConditionFalse, + "reserved", + "reserved", + controllerutils.UpdateConditionIfReasonOrMessageChange, + ) + if err := r.Status().Update(context.Background(), cdc); err != nil { + logger.WithError(err).Log(controllerutils.LogLevel(err), "could not update ClusterDeploymentCustomization conditions") + return newInstallConfig, err + } + + return newInstallConfig, nil +} + +func applyPatches(patches []hivev1.PatchEntity, data []byte, logger log.FieldLogger) ([]byte, error) { + // TODO: the error handling might be wrong + targetJson, err := yaml.YAMLToJSON(data) + if err != nil { + log.WithError(err).Error("unable to parse install-config template") + return data, err + } + + patchJson, err := json.Marshal(patches) + if err != nil { + log.WithError(err).Error("unable to marshal patches to json") + return data, err + } + + patch, err := jsonpatch.DecodePatch(patchJson) + if err != nil { + log.WithError(err).Error("unable to create json patch") + return data, err + } + + patchedJson, err := patch.Apply(targetJson) + if err != nil { + log.WithError(err).Error("unable to patch install-config template") + return data, err + } + + patchedYaml, _ := yaml.JSONToYAML(patchedJson) + + return patchedYaml, nil +} diff --git a/pkg/controller/clusterpool/collections.go b/pkg/controller/clusterpool/collections.go index 1c679e913ac..58f6ebe5b41 100644 --- a/pkg/controller/clusterpool/collections.go +++ b/pkg/controller/clusterpool/collections.go @@ -264,6 +264,18 @@ func isBroken(cd *hivev1.ClusterDeployment, pool *hivev1.ClusterPool, logger log return true } + customizationExists := false + cdcName := cd.Spec.ClusterPoolRef.ClusterDeploymentCustomizationRef.Name + for _, entry := range pool.Spec.Inventory { + if cdcName == entry.Name { + customizationExists = true + } + } + if !customizationExists { + logger.Infof("Cluster %s is broken due to removed customization %s", cd.Name, cdcName) + return true + } + return false } diff --git a/pkg/controller/utils/conditions.go b/pkg/controller/utils/conditions.go index 88e23439a28..66f25d7a362 100644 --- a/pkg/controller/utils/conditions.go +++ b/pkg/controller/utils/conditions.go @@ -522,6 +522,58 @@ func SetMachinePoolCondition( return newConditions } +// SetClusterDeploymentCustomizationCondition sets a condition on a ClusterDeploymentCustomization resource's status +func SetClusterDeploymentCustomizationCondition( + conditions []hivev1.ClusterDeploymentCustomizationCondition, + conditionType hivev1.ClusterDeploymentCustomizationConditionType, + status corev1.ConditionStatus, + reason string, + message string, + updateConditionCheck UpdateConditionCheck, +) []hivev1.ClusterDeploymentCustomizationCondition { + now := metav1.Now() + existingCondition := FindClusterDeploymentCustomizationCondition(conditions, conditionType) + if existingCondition == nil { + if status == corev1.ConditionTrue { + conditions = append( + conditions, + hivev1.ClusterDeploymentCustomizationCondition{ + Type: conditionType, + Status: status, + Reason: reason, + Message: message, + LastTransitionTime: now, + LastProbeTime: now, + }, + ) + } + } else { + if shouldUpdateCondition( + existingCondition.Status, existingCondition.Reason, existingCondition.Message, + status, reason, message, + updateConditionCheck, + ) { + if existingCondition.Status != status { + existingCondition.LastTransitionTime = now + } + existingCondition.Status = status + existingCondition.Reason = reason + existingCondition.Message = message + existingCondition.LastProbeTime = now + } + } + return conditions +} + +func FindClusterDeploymentCustomizationCondition(conditions []hivev1.ClusterDeploymentCustomizationCondition, conditionType hivev1.ClusterDeploymentCustomizationConditionType) *hivev1.ClusterDeploymentCustomizationCondition { + for i, condition := range conditions { + if condition.Type == conditionType { + return &conditions[i] + } + } + return nil +} + // SetMachinePoolConditionWithChangeCheck sets a condition on a MachinePool resource's status. // It returns the conditions as well a boolean indicating whether there was a change made // to the conditions. diff --git a/pkg/validating-webhooks/hive/v1/clusterdeploymentcustomization_validating_admission_hook.go b/pkg/validating-webhooks/hive/v1/clusterdeploymentcustomization_validating_admission_hook.go new file mode 100644 index 00000000000..bfb7069bd54 --- /dev/null +++ b/pkg/validating-webhooks/hive/v1/clusterdeploymentcustomization_validating_admission_hook.go @@ -0,0 +1,285 @@ +package v1 + +import ( + "fmt" + "net/http" + "strings" + + log "github.com/sirupsen/logrus" + + admissionv1beta1 "k8s.io/api/admission/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + hivev1 "github.com/openshift/hive/apis/hive/v1" +) + +const ( + clusterDeploymentCustomizationGroup = "hive.openshift.io" + clusterDeploymentCustomizationVersion = "v1" + clusterDeploymentCustomizationResource = "clusterdeploymentcustomization" + + clusterDeploymentCustomizationAdmissionGroup = "admission.hive.openshift.io" + clusterDeploymentCustomizationAdmissionVersion = "v1" +) + +// ClusterDeploymentCustomizationlValidatingAdmissionHook is a struct that is used to reference what code should be run by the generic-admission-server. +type ClusterDeploymentCustomizationValidatingAdmissionHook struct { + decoder *admission.Decoder +} + +// NewClusterDeploymentCustomizationValidatingAdmissionHook constructs a new ClusterDeploymentCustomizationValidatingAdmissionHook +func NewClusterDeploymentCustomizationValidatingAdmissionHook(decoder *admission.Decoder) *ClusterDeploymentCustomizationValidatingAdmissionHook { + return &ClusterDeploymentCustomizationValidatingAdmissionHook{ + decoder: decoder, + } +} + +// ValidatingResource is called by generic-admission-server on startup to register the returned REST resource through which the +// webhook is accessed by the kube apiserver. +// For example, generic-admission-server uses the data below to register the webhook on the REST resource "/apis/admission.hive.openshift.io/v1/clusterdeploymentcustomizationvalidators". +// When the kube apiserver calls this registered REST resource, the generic-admission-server calls the Validate() method below. +func (a *ClusterDeploymentCustomizationValidatingAdmissionHook) ValidatingResource() (plural schema.GroupVersionResource, singular string) { + log.WithFields(log.Fields{ + "group": clusterDeploymentCustomizationAdmissionGroup, + "version": clusterDeploymentCustomizationAdmissionVersion, + "resource": "clusterdeploymentcustomizationvalidator", + }).Info("Registering validation REST resource") + + // NOTE: This GVR is meant to be different than the ClusterDeploymentCustomization CRD GVR which has group "hive.openshift.io". + return schema.GroupVersionResource{ + Group: clusterDeploymentCustomizationAdmissionGroup, + Version: clusterDeploymentCustomizationAdmissionVersion, + Resource: "clusterdeploymentcustomizationvalidators", + }, + "clusterdeploymentcustomizationvalidator" +} + +// Initialize is called by generic-admission-server on startup to setup any special initialization that your webhook needs. +func (a *ClusterDeploymentCustomizationValidatingAdmissionHook) Initialize(kubeClientConfig *rest.Config, stopCh <-chan struct{}) error { + log.WithFields(log.Fields{ + "group": clusterDeploymentCustomizationAdmissionGroup, + "version": clusterDeploymentCustomizationAdmissionVersion, + "resource": "clusterdeploymentcustomizationvalidator", + }).Info("Initializing validation REST resource") + return nil // No initialization needed right now. +} + +// Validate is called by generic-admission-server when the registered REST resource above is called with an admission request. +// Usually it's the kube apiserver that is making the admission validation request. +func (a *ClusterDeploymentCustomizationValidatingAdmissionHook) Validate(admissionSpec *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse { + contextLogger := log.WithFields(log.Fields{ + "operation": admissionSpec.Operation, + "group": admissionSpec.Resource.Group, + "version": admissionSpec.Resource.Version, + "resource": admissionSpec.Resource.Resource, + "method": "Validate", + }) + + if !a.shouldValidate(admissionSpec) { + contextLogger.Info("Skipping validation for request") + // The request object isn't something that this validator should validate. + // Therefore, we say that it's Allowed. + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } + } + + contextLogger.Info("Validating request") + + switch admissionSpec.Operation { + case admissionv1beta1.Create: + return a.validateCreate(admissionSpec) + case admissionv1beta1.Update: + return a.validateUpdate(admissionSpec) + default: + contextLogger.Info("Successful validation") + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } + } +} + +// shouldValidate explicitly checks if the request should validated. For example, this webhook may have accidentally been registered to check +// the validity of some other type of object with a different GVR. +func (a *ClusterDeploymentCustomizationValidatingAdmissionHook) shouldValidate(admissionSpec *admissionv1beta1.AdmissionRequest) bool { + contextLogger := log.WithFields(log.Fields{ + "operation": admissionSpec.Operation, + "group": admissionSpec.Resource.Group, + "version": admissionSpec.Resource.Version, + "resource": admissionSpec.Resource.Resource, + "method": "shouldValidate", + }) + + if admissionSpec.Resource.Group != clusterDeploymentCustomizationGroup { + contextLogger.Info("Returning False, not our group") + return false + } + + if admissionSpec.Resource.Version != clusterDeploymentCustomizationVersion { + contextLogger.Info("Returning False, it's our group, but not the right version") + return false + } + + if admissionSpec.Resource.Resource != clusterDeploymentCustomizationResource { + contextLogger.Info("Returning False, it's our group and version, but not the right resource") + return false + } + + // If we get here, then we're supposed to validate the object. + contextLogger.Debug("Returning True, passed all prerequisites.") + return true +} + +// validateCreate specifically validates create operations for ClusterDeploymentCustomization objects. +func (a *ClusterDeploymentCustomizationValidatingAdmissionHook) validateCreate(admissionSpec *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse { + contextLogger := log.WithFields(log.Fields{ + "operation": admissionSpec.Operation, + "group": admissionSpec.Resource.Group, + "version": admissionSpec.Resource.Version, + "resource": admissionSpec.Resource.Resource, + "method": "validateCreate", + }) + + cdc := &hivev1.ClusterDeploymentCustomization{} + if err := a.decoder.DecodeRaw(admissionSpec.Object, cdc); err != nil { + contextLogger.Errorf("Failed unmarshaling Object: %v", err.Error()) + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: err.Error(), + }, + } + } + + // Add the new data to the contextLogger + contextLogger.Data["object.Name"] = cdc.Name + + // TODO: Put Create Validation Here (or in openAPIV3Schema validation section of crd) + + if len(cdc.Name) > validation.DNS1123LabelMaxLength { + message := fmt.Sprintf("Invalid cluster deployment customization name (.meta.name): %s", validation.MaxLenError(validation.DNS1123LabelMaxLength)) + contextLogger.Error(message) + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: message, + }, + } + } + + allErrs := field.ErrorList{} + specPath := field.NewPath("spec") + + allErrs = append(allErrs, validateInstallConfigPatches(specPath.Child("installConfigPatches"), cdc.Spec.InstallConfigPatches)...) + + if len(allErrs) > 0 { + status := errors.NewInvalid(schemaGVK(admissionSpec.Kind).GroupKind(), admissionSpec.Name, allErrs).Status() + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &status, + } + } + + // If we get here, then all checks passed, so the object is valid. + contextLogger.Info("Successful validation") + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } +} + +// validateUpdate specifically validates update operations for ClusterDeployment objects. +func (a *ClusterDeploymentCustomizationValidatingAdmissionHook) validateUpdate(admissionSpec *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse { + contextLogger := log.WithFields(log.Fields{ + "operation": admissionSpec.Operation, + "group": admissionSpec.Resource.Group, + "version": admissionSpec.Resource.Version, + "resource": admissionSpec.Resource.Resource, + "method": "validateUpdate", + }) + + newObject := &hivev1.ClusterDeploymentCustomization{} + if err := a.decoder.DecodeRaw(admissionSpec.Object, newObject); err != nil { + contextLogger.Errorf("Failed unmarshaling Object: %v", err.Error()) + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: err.Error(), + }, + } + } + + // Add the new data to the contextLogger + contextLogger.Data["object.Name"] = newObject.Name + + oldObject := &hivev1.ClusterDeploymentCustomization{} + if err := a.decoder.DecodeRaw(admissionSpec.OldObject, oldObject); err != nil { + contextLogger.Errorf("Failed unmarshaling OldObject: %v", err.Error()) + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: err.Error(), + }, + } + } + + // Add the new data to the contextLogger + contextLogger.Data["oldObject.Name"] = oldObject.Name + + allErrs := field.ErrorList{} + specPath := field.NewPath("spec") + + allErrs = append(allErrs, validateInstallConfigPatches(specPath, newObject.Spec.InstallConfigPatches)...) + + if len(allErrs) > 0 { + contextLogger.WithError(allErrs.ToAggregate()).Info("failed validation") + status := errors.NewInvalid(schemaGVK(admissionSpec.Kind).GroupKind(), admissionSpec.Name, allErrs).Status() + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &status, + } + } + + // If we get here, then all checks passed, so the object is valid. + contextLogger.Info("Successful validation") + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } +} + +func validateInstallConfigPatches(path *field.Path, patches []hivev1.PatchEntity) field.ErrorList { + allErrs := field.ErrorList{} + + for i, patch := range patches { + if !isValidOP(patch.Op) { + allErrs = append(allErrs, field.Invalid(path.Index(i), patch, "install config patch op must be a valid json patch operation")) + } + if len(patch.Path) == 0 || !strings.HasPrefix(patch.Path, "/") { + allErrs = append(allErrs, field.Invalid(path.Index(i), patch, "install config patch path must start with '/'")) + } + } + return allErrs +} + +func isValidOP(op string) bool { + switch op { + case + "replace", + "add", + "remove", + "test", + "copy", + "move": + return true + } + return false +} diff --git a/pkg/validating-webhooks/hive/v1/clusterpool_validating_admission_hook.go b/pkg/validating-webhooks/hive/v1/clusterpool_validating_admission_hook.go index 1302c8dbad0..47d706fee15 100644 --- a/pkg/validating-webhooks/hive/v1/clusterpool_validating_admission_hook.go +++ b/pkg/validating-webhooks/hive/v1/clusterpool_validating_admission_hook.go @@ -179,6 +179,10 @@ func (a *ClusterPoolValidatingAdmissionHook) validateCreate(admissionSpec *admis allErrs = append(allErrs, validateClusterPlatform(specPath, newObject.Spec.Platform)...) + if newObject.Spec.Inventory != nil { + allErrs = append(allErrs, validateInventory(specPath, newObject.Spec.Inventory)...) + } + if len(allErrs) > 0 { status := errors.NewInvalid(schemaGVK(admissionSpec.Kind).GroupKind(), admissionSpec.Name, allErrs).Status() return &admissionv1beta1.AdmissionResponse{ @@ -239,6 +243,10 @@ func (a *ClusterPoolValidatingAdmissionHook) validateUpdate(admissionSpec *admis allErrs = append(allErrs, validateClusterPlatform(specPath, newObject.Spec.Platform)...) + if newObject.Spec.Inventory != nil { + allErrs = append(allErrs, validateInventory(specPath, newObject.Spec.Inventory)...) + } + if len(allErrs) > 0 { contextLogger.WithError(allErrs.ToAggregate()).Info("failed validation") status := errors.NewInvalid(schemaGVK(admissionSpec.Kind).GroupKind(), admissionSpec.Name, allErrs).Status() @@ -254,3 +262,11 @@ func (a *ClusterPoolValidatingAdmissionHook) validateUpdate(admissionSpec *admis Allowed: true, } } + +func validateInventory(path *field.Path, inventory []hivev1.InventoryEntry) field.ErrorList { + allErrs := field.ErrorList{} + if len(inventory) == 0 { + allErrs = append(allErrs, field.Invalid(path, inventory, "inventory can't be empty")) + } + return allErrs +} diff --git a/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeployment_types.go b/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeployment_types.go index ff3421b9b3b..265fa5254da 100644 --- a/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeployment_types.go +++ b/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeployment_types.go @@ -266,6 +266,8 @@ type ClusterPoolReference struct { // ClaimedTimestamp is the time this cluster was assigned to a ClusterClaim. This is only used for // ClusterDeployments belonging to ClusterPools. ClaimedTimestamp *metav1.Time `json:"claimedTimestamp,omitempty"` + // ClusterDeploymentCustomizationRef is the ClusterPool Inventory claimed customization for this ClusterDeployment + ClusterDeploymentCustomizationRef *corev1.LocalObjectReference `json:"clusterDeploymentCustomization"` } // ClusterMetadata contains metadata information about the installed cluster. diff --git a/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go b/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go new file mode 100644 index 00000000000..ec01b6cffad --- /dev/null +++ b/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go @@ -0,0 +1,90 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:noStatus +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterDeploymentCustomization is the Schema for clusterdeploymentcustomizations API +// +kubebuilder:subresource:status +// +k8s:openapi-gen=true +// +kubebuilder:resource:scope=Namespaced +type ClusterDeploymentCustomization struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ClusterDeploymentCustomizationSpec `json:"spec"` + Status ClusterDeploymentCustomizationStatus `json:"status,omitempty"` +} + +// ClusterDeploymentCustomizationSpec defines the desired state of ClusterDeploymentCustomization +type ClusterDeploymentCustomizationSpec struct { + // TODO: documentation + InstallConfigPatches []PatchEntity `json:"installConfigPatches,omitempty"` +} + +// TODO: documentation +type PatchEntity struct { + // +required + Op string `json:"op"` + // +required + Path string `json:"path"` + // +required + Value string `json:"value"` +} + +// ClusterDeploymentCustomizationStatus defines the observed state of ClusterDeploymentCustomization +type ClusterDeploymentCustomizationStatus struct { + // TODO: documentation + // +optional + ClusterDeploymentRef *corev1.LocalObjectReference `json:"clusterDeploymentRef,omitempty"` + + // Conditions includes more detailed status for the cluster deployment customization status. + // +optional + Conditions []ClusterDeploymentCustomizationCondition `json:"conditions,omitempty"` +} + +type ClusterDeploymentCustomizationCondition struct { + // Type is the type of the condition. + Type ClusterDeploymentCustomizationConditionType `json:"type"` + // Status is the status of the condition. + Status corev1.ConditionStatus `json:"status"` + // LastProbeTime is the last time we probed the condition. + // +optional + LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"` + // LastTransitionTime is the last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + // Reason is a unique, one-word, CamelCase reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty"` + // Message is a human-readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty"` +} + +// ClusterDeploymentCustomizationConditionType is a valid value for ClusterDeploymentCustomizationCondition.Type +type ClusterDeploymentCustomizationConditionType string + +const ( + // TODO: add more types + // TODO: shorter name? + ClusterDeploymentCustomizationAvailableCondition ClusterDeploymentCustomizationConditionType = "Available" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterDeploymentCustomizationLis contains the list of ClusterDeploymentCustomization +type ClusterDeploymentCustomizationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []ClusterDeploymentCustomization `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ClusterDeploymentCustomization{}, &ClusterDeploymentCustomizationList{}) +} diff --git a/vendor/github.com/openshift/hive/apis/hive/v1/clusterpool_types.go b/vendor/github.com/openshift/hive/apis/hive/v1/clusterpool_types.go index 245492d285d..98941d27e9c 100644 --- a/vendor/github.com/openshift/hive/apis/hive/v1/clusterpool_types.go +++ b/vendor/github.com/openshift/hive/apis/hive/v1/clusterpool_types.go @@ -88,6 +88,11 @@ type ClusterPoolSpec struct { // HibernationConfig configures the hibernation/resume behavior of ClusterDeployments owned by the ClusterPool. // +optional HibernationConfig *HibernationConfig `json:"hibernationConfig"` + + // Inventory maintains a list entries consumed by the clusterpool + // to customize the default the cluster deployment + // +optional + Inventory []InventoryEntry `json:"inventory,omitempty"` } type HibernationConfig struct { @@ -99,6 +104,22 @@ type HibernationConfig struct { ResumeTimeout metav1.Duration `json:"resumeTimeout"` } +// InventoryEntryKind in Kind of the inventory entry +// +kubebuilder:validation:Enum="";ClusterDeploymentCustomization +type InventoryEntryKind string + +const ClusterDeploymentCustomizationInventoryEntry InventoryEntryKind = "ClusterDeploymentCustomization" + +// InventoryEntry maintains a reference to a custom resource consumed by a clusterpool to customize the cluster deployment +type InventoryEntry struct { + // Kind denotes the kind of the referenced resource. The default is ClusterDeploymentCustomization, which is also currently the only supported value. + // +optional + Kind InventoryEntryKind `json:"kind,omitempty"` + // Name is the name of the referenced resource. + // +required + Name string `json:"name,omitempty"` +} + // ClusterPoolClaimLifetime defines the lifetimes for claims for the cluster pool. type ClusterPoolClaimLifetime struct { // Default is the default lifetime of the claim when no lifetime is set on the claim itself. @@ -168,6 +189,8 @@ const ( // ClusterPoolAllClustersCurrentCondition indicates whether all unassigned (installing or ready) // ClusterDeployments in the pool match the current configuration of the ClusterPool. ClusterPoolAllClustersCurrentCondition ClusterPoolConditionType = "AllClustersCurrent" + // ClusterPoolInventoryValidCondition is set to provide information on whether the cluster pool inventory is valid + ClusterPoolInventoryValidCondition ClusterPoolConditionType = "InventoryValid" ) // +genclient diff --git a/vendor/github.com/openshift/hive/apis/hive/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/hive/apis/hive/v1/zz_generated.deepcopy.go index f460058df33..4b21c871601 100644 --- a/vendor/github.com/openshift/hive/apis/hive/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/hive/apis/hive/v1/zz_generated.deepcopy.go @@ -654,6 +654,134 @@ func (in *ClusterDeploymentCondition) DeepCopy() *ClusterDeploymentCondition { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeploymentCustomization) DeepCopyInto(out *ClusterDeploymentCustomization) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentCustomization. +func (in *ClusterDeploymentCustomization) DeepCopy() *ClusterDeploymentCustomization { + if in == nil { + return nil + } + out := new(ClusterDeploymentCustomization) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterDeploymentCustomization) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeploymentCustomizationCondition) DeepCopyInto(out *ClusterDeploymentCustomizationCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentCustomizationCondition. +func (in *ClusterDeploymentCustomizationCondition) DeepCopy() *ClusterDeploymentCustomizationCondition { + if in == nil { + return nil + } + out := new(ClusterDeploymentCustomizationCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeploymentCustomizationList) DeepCopyInto(out *ClusterDeploymentCustomizationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterDeploymentCustomization, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentCustomizationList. +func (in *ClusterDeploymentCustomizationList) DeepCopy() *ClusterDeploymentCustomizationList { + if in == nil { + return nil + } + out := new(ClusterDeploymentCustomizationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterDeploymentCustomizationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeploymentCustomizationSpec) DeepCopyInto(out *ClusterDeploymentCustomizationSpec) { + *out = *in + if in.InstallConfigPatches != nil { + in, out := &in.InstallConfigPatches, &out.InstallConfigPatches + *out = make([]PatchEntity, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentCustomizationSpec. +func (in *ClusterDeploymentCustomizationSpec) DeepCopy() *ClusterDeploymentCustomizationSpec { + if in == nil { + return nil + } + out := new(ClusterDeploymentCustomizationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeploymentCustomizationStatus) DeepCopyInto(out *ClusterDeploymentCustomizationStatus) { + *out = *in + if in.ClusterDeploymentRef != nil { + in, out := &in.ClusterDeploymentRef, &out.ClusterDeploymentRef + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ClusterDeploymentCustomizationCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentCustomizationStatus. +func (in *ClusterDeploymentCustomizationStatus) DeepCopy() *ClusterDeploymentCustomizationStatus { + if in == nil { + return nil + } + out := new(ClusterDeploymentCustomizationStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterDeploymentList) DeepCopyInto(out *ClusterDeploymentList) { *out = *in @@ -1309,6 +1437,11 @@ func (in *ClusterPoolReference) DeepCopyInto(out *ClusterPoolReference) { in, out := &in.ClaimedTimestamp, &out.ClaimedTimestamp *out = (*in).DeepCopy() } + if in.ClusterDeploymentCustomizationRef != nil { + in, out := &in.ClusterDeploymentCustomizationRef, &out.ClusterDeploymentCustomizationRef + *out = new(corev1.LocalObjectReference) + **out = **in + } return } @@ -1376,6 +1509,16 @@ func (in *ClusterPoolSpec) DeepCopyInto(out *ClusterPoolSpec) { *out = new(ClusterPoolClaimLifetime) (*in).DeepCopyInto(*out) } + if in.HibernationConfig != nil { + in, out := &in.HibernationConfig, &out.HibernationConfig + *out = new(HibernationConfig) + **out = **in + } + if in.Inventory != nil { + in, out := &in.Inventory, &out.Inventory + *out = make([]InventoryEntry, len(*in)) + copy(*out, *in) + } return } @@ -2234,6 +2377,23 @@ func (in *GCPDNSZoneStatus) DeepCopy() *GCPDNSZoneStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HibernationConfig) DeepCopyInto(out *HibernationConfig) { + *out = *in + out.ResumeTimeout = in.ResumeTimeout + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HibernationConfig. +func (in *HibernationConfig) DeepCopy() *HibernationConfig { + if in == nil { + return nil + } + out := new(HibernationConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HiveConfig) DeepCopyInto(out *HiveConfig) { *out = *in @@ -2424,6 +2584,22 @@ func (in *IdentityProviderStatus) DeepCopy() *IdentityProviderStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InventoryEntry) DeepCopyInto(out *InventoryEntry) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InventoryEntry. +func (in *InventoryEntry) DeepCopy() *InventoryEntry { + if in == nil { + return nil + } + out := new(InventoryEntry) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KubeconfigSecretReference) DeepCopyInto(out *KubeconfigSecretReference) { *out = *in @@ -2903,6 +3079,22 @@ func (in *OvirtClusterDeprovision) DeepCopy() *OvirtClusterDeprovision { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PatchEntity) DeepCopyInto(out *PatchEntity) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PatchEntity. +func (in *PatchEntity) DeepCopy() *PatchEntity { + if in == nil { + return nil + } + out := new(PatchEntity) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Platform) DeepCopyInto(out *Platform) { *out = *in