From e59435198292e944d0999a18f748b42f2baad3b4 Mon Sep 17 00:00:00 2001 From: Fred Rolland Date: Thu, 29 Apr 2021 10:23:07 +0300 Subject: [PATCH] MGMT-5396 Migrate to AgentClusterInstall CRD - Add new CRD type agentclusterinstall that will be referenced from ClusterDeployment - Migrate controller - Fix unit-tests - Fix subsystem test The following AgentClusterInstall conditions are renamed: - Installed : Completed - ReadyForInstallation: RequirementsMet Signed-off-by: Fred Rolland Co-authored-by: Devan Goodwin dgoodwin@redhat.com --- cmd/main.go | 2 + ...ive.openshift.io_agentclusterinstalls.yaml | 300 ++++++++ config/crd/kustomization.yaml | 1 + config/crd/resources.yaml | 237 +++++++ config/rbac/role.yaml | 20 + ...ervice-operator.clusterserviceversion.yaml | 23 + ...ive.openshift.io_agentclusterinstalls.yaml | 236 ++++++ deploy/olm-catalog/metadata/annotations.yaml | 2 +- go.mod | 3 +- go.sum | 7 +- .../hive.openshift.io_clusterdeployments.yaml | 334 +++------ .../v1beta1/agentclusterinstall_types.go | 180 +++++ .../v1beta1/groupversion_info.go | 41 ++ .../v1beta1/zz_generated.deepcopy.go | 237 +++++++ ...ive.openshift.io_agentclusterinstalls.yaml | 300 ++++++++ .../controller/config/crd/kustomization.yaml | 29 + .../controllers/agent_controller_test.go | 26 +- .../controllers/bmh_agent_controller_test.go | 8 +- .../clusterdeployments_controller.go | 319 +++++---- .../clusterdeployments_controller_test.go | 671 ++++++++++-------- internal/controller/controllers/conditions.go | 34 +- .../controllers/controllers_suite_test.go | 2 + .../controllers/infraenv_controller_test.go | 26 +- subsystem/kubeapi_test.go | 383 ++++++---- subsystem/subsystem_suite_test.go | 5 +- 25 files changed, 2571 insertions(+), 855 deletions(-) create mode 100644 config/crd/bases/extensions.hive.openshift.io_agentclusterinstalls.yaml create mode 100644 deploy/olm-catalog/manifests/extensions.hive.openshift.io_agentclusterinstalls.yaml create mode 100644 internal/controller/api/hiveextension/v1beta1/agentclusterinstall_types.go create mode 100644 internal/controller/api/hiveextension/v1beta1/groupversion_info.go create mode 100644 internal/controller/api/hiveextension/v1beta1/zz_generated.deepcopy.go create mode 100644 internal/controller/config/crd/bases/extensions.hive.openshift.io_agentclusterinstalls.yaml create mode 100644 internal/controller/config/crd/kustomization.yaml diff --git a/cmd/main.go b/cmd/main.go index fba9756d27a..8ebc813c2cd 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -23,6 +23,7 @@ import ( "github.com/openshift/assisted-service/internal/cluster/validations" "github.com/openshift/assisted-service/internal/common" "github.com/openshift/assisted-service/internal/connectivity" + hiveext "github.com/openshift/assisted-service/internal/controller/api/hiveextension/v1beta1" aiv1beta1 "github.com/openshift/assisted-service/internal/controller/api/v1beta1" "github.com/openshift/assisted-service/internal/controller/controllers" "github.com/openshift/assisted-service/internal/dns" @@ -675,6 +676,7 @@ func createControllerManager() (manager.Manager, error) { utilruntime.Must(scheme.AddToScheme(schemes)) utilruntime.Must(aiv1beta1.AddToScheme(schemes)) utilruntime.Must(hivev1.AddToScheme(schemes)) + utilruntime.Must(hiveext.AddToScheme(schemes)) utilruntime.Must(bmh_v1alpha1.AddToScheme(schemes)) return ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ diff --git a/config/crd/bases/extensions.hive.openshift.io_agentclusterinstalls.yaml b/config/crd/bases/extensions.hive.openshift.io_agentclusterinstalls.yaml new file mode 100644 index 00000000000..043db9a73b6 --- /dev/null +++ b/config/crd/bases/extensions.hive.openshift.io_agentclusterinstalls.yaml @@ -0,0 +1,300 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.0 + creationTimestamp: null + name: agentclusterinstalls.extensions.hive.openshift.io +spec: + group: extensions.hive.openshift.io + names: + kind: AgentClusterInstall + listKind: AgentClusterInstallList + plural: agentclusterinstalls + singular: agentclusterinstall + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: AgentClusterInstall represents a request to provision an agent + based cluster. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: AgentClusterInstallSpec defines the desired state of the + AgentClusterInstall. + properties: + apiVIP: + description: APIVIP is the virtual IP used to reach the OpenShift + cluster's API. + type: string + clusterDeploymentRef: + description: ClusterDeploymentRef is a reference to the ClusterDeployment + associated with this AgentClusterInstall. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + clusterMetadata: + description: ClusterMetadata contains metadata information about the + installed cluster. It should be populated once the cluster install + is completed. (it can be populated sooner if desired, but Hive will + not copy back to ClusterDeployment until the Installed condition + goes True. + properties: + adminKubeconfigSecretRef: + description: AdminKubeconfigSecretRef references the secret containing + the admin kubeconfig for this cluster. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + adminPasswordSecretRef: + description: AdminPasswordSecretRef references the secret containing + the admin username/password which can be used to login to this + cluster. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + clusterID: + description: ClusterID is a globally unique identifier for this + cluster generated during installation. Used for reporting metrics + among other places. + type: string + infraID: + description: InfraID is an identifier for this cluster generated + during installation and used for tagging/naming resources in + cloud providers. + type: string + required: + - adminKubeconfigSecretRef + - adminPasswordSecretRef + - clusterID + - infraID + type: object + compute: + description: Compute is the configuration for the machines that comprise + the compute nodes. + items: + description: AgentMachinePool is a pool of machines to be installed. + properties: + hyperthreading: + description: Hyperthreading determines the mode of hyperthreading + that machines in the pool will utilize. Default is for hyperthreading + to be enabled. + enum: + - "" + - Enabled + - Disabled + type: string + type: object + type: array + controlPlane: + description: ControlPlane is the configuration for the machines that + comprise the control plane. + properties: + hyperthreading: + description: Hyperthreading determines the mode of hyperthreading + that machines in the pool will utilize. Default is for hyperthreading + to be enabled. + enum: + - "" + - Enabled + - Disabled + type: string + type: object + imageSetRef: + description: ImageSetRef is a reference to a ClusterImageSet. The + release image specified in the ClusterImageSet will be used to install + the cluster. + properties: + name: + description: Name is the name of the ClusterImageSet that this + refers to + type: string + required: + - name + type: object + ingressVIP: + description: IngressVIP is the virtual IP used for cluster ingress + traffic. + type: string + manifestsConfigMapRef: + description: ManifestsConfigMapRef is a reference to user-provided + manifests to add to or replace manifests that are generated by the + installer. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + networking: + description: Networking is the configuration for the pod network provider + in the cluster. + properties: + clusterNetwork: + description: ClusterNetwork is the list of IP address pools for + pods. Default is 10.128.0.0/14 and a host prefix of /23. + items: + description: ClusterNetworkEntry is a single IP address block + for pod IP blocks. IP blocks are allocated with size 2^HostSubnetLength. + properties: + cidr: + description: CIDR is the IP block address pool. + type: string + hostPrefix: + description: HostPrefix is the prefix size to allocate to + each node from the CIDR. For example, 24 would allocate + 2^8=256 adresses to each node. If this field is not used + by the plugin, it can be left unset. + format: int32 + type: integer + required: + - cidr + type: object + type: array + machineNetwork: + description: MachineNetwork is the list of IP address pools for + machines. + items: + description: MachineNetworkEntry is a single IP address block + for node IP blocks. + properties: + cidr: + description: CIDR is the IP block address pool for machines + within the cluster. + type: string + required: + - cidr + type: object + type: array + serviceNetwork: + description: 'ServiceNetwork is the list of IP address pools for + services. Default is 172.30.0.0/16. NOTE: currently only one + entry is supported.' + items: + type: string + maxItems: 1 + type: array + type: object + provisionRequirements: + description: ProvisionRequirements defines configuration for when + the installation is ready to be launched automatically. + properties: + controlPlaneAgents: + description: ControlPlaneAgents is the number of matching approved + and ready Agents with the control plane role required to launch + the install. Must be either 1 or 3. + type: integer + workerAgents: + description: WorkerAgents is the minimum number of matching approved + and ready Agents with the worker role required to launch the + install. + minimum: 0 + type: integer + required: + - controlPlaneAgents + type: object + sshPublicKey: + description: SSHPublicKey will be added to all cluster hosts for use + in debugging. + type: string + required: + - clusterDeploymentRef + - imageSetRef + - networking + - provisionRequirements + type: object + status: + description: AgentClusterInstallStatus defines the observed state of the + AgentClusterInstall. + properties: + conditions: + description: Conditions includes more detailed status for the cluster + install. + items: + description: ClusterInstallCondition contains details for the current + condition of a cluster install. + properties: + lastProbeTime: + description: LastProbeTime is the last time we probed the condition. + format: date-time + type: string + lastTransitionTime: + description: LastTransitionTime is the last time the condition + transitioned from one status to another. + format: date-time + type: string + message: + description: Message is a human-readable message indicating + details about last transition. + type: string + reason: + description: Reason is a unique, one-word, CamelCase reason + for the condition's last transition. + type: string + status: + description: Status is the status of the condition. + type: string + type: + description: Type is the type of the condition. + type: string + required: + - status + - type + type: object + type: array + connectivityMajorityGroups: + type: string + controlPlaneAgentsDiscovered: + description: ControlPlaneAgentsDiscovered is the number of Agents + currently linked to this ClusterDeployment. + type: integer + controlPlaneAgentsReady: + description: ControlPlaneAgentsDiscovered is the number of Agents + currently linked to this ClusterDeployment that are ready for use. + type: integer + workerAgentsDiscovered: + description: WorkerAgentsDiscovered is the number of worker Agents + currently linked to this ClusterDeployment. + type: integer + workerAgentsReady: + description: WorkerAgentsDiscovered is the number of worker Agents + currently linked to this ClusterDeployment that are ready for use. + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 137b60666f7..767289996ab 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -6,6 +6,7 @@ resources: - bases/agent-install.openshift.io_infraenvs.yaml - bases/agent-install.openshift.io_agents.yaml - bases/agent-install.openshift.io_nmstateconfigs.yaml +- bases/extensions.hive.openshift.io_agentclusterinstalls.yaml # +kubebuilder:scaffold:crdkustomizeresource patchesStrategicMerge: diff --git a/config/crd/resources.yaml b/config/crd/resources.yaml index b3e1971abfa..f92914a65b9 100644 --- a/config/crd/resources.yaml +++ b/config/crd/resources.yaml @@ -1,5 +1,242 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.0 + creationTimestamp: null + name: agentclusterinstalls.extensions.hive.openshift.io +spec: + group: extensions.hive.openshift.io + names: + kind: AgentClusterInstall + listKind: AgentClusterInstallList + plural: agentclusterinstalls + singular: agentclusterinstall + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: AgentClusterInstall represents a request to provision an agent based cluster. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: AgentClusterInstallSpec defines the desired state of the AgentClusterInstall. + properties: + apiVIP: + description: APIVIP is the virtual IP used to reach the OpenShift cluster's API. + type: string + clusterDeploymentRef: + description: ClusterDeploymentRef is a reference to the ClusterDeployment associated with this AgentClusterInstall. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + clusterMetadata: + description: ClusterMetadata contains metadata information about the installed cluster. It should be populated once the cluster install is completed. (it can be populated sooner if desired, but Hive will not copy back to ClusterDeployment until the Installed condition goes True. + properties: + adminKubeconfigSecretRef: + description: AdminKubeconfigSecretRef references the secret containing the admin kubeconfig for this cluster. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + adminPasswordSecretRef: + description: AdminPasswordSecretRef references the secret containing the admin username/password which can be used to login to this cluster. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + clusterID: + description: ClusterID is a globally unique identifier for this cluster generated during installation. Used for reporting metrics among other places. + type: string + infraID: + description: InfraID is an identifier for this cluster generated during installation and used for tagging/naming resources in cloud providers. + type: string + required: + - adminKubeconfigSecretRef + - adminPasswordSecretRef + - clusterID + - infraID + type: object + compute: + description: Compute is the configuration for the machines that comprise the compute nodes. + items: + description: AgentMachinePool is a pool of machines to be installed. + properties: + hyperthreading: + description: Hyperthreading determines the mode of hyperthreading that machines in the pool will utilize. Default is for hyperthreading to be enabled. + enum: + - "" + - Enabled + - Disabled + type: string + type: object + type: array + controlPlane: + description: ControlPlane is the configuration for the machines that comprise the control plane. + properties: + hyperthreading: + description: Hyperthreading determines the mode of hyperthreading that machines in the pool will utilize. Default is for hyperthreading to be enabled. + enum: + - "" + - Enabled + - Disabled + type: string + type: object + imageSetRef: + description: ImageSetRef is a reference to a ClusterImageSet. The release image specified in the ClusterImageSet will be used to install the cluster. + properties: + name: + description: Name is the name of the ClusterImageSet that this refers to + type: string + required: + - name + type: object + ingressVIP: + description: IngressVIP is the virtual IP used for cluster ingress traffic. + type: string + manifestsConfigMapRef: + description: ManifestsConfigMapRef is a reference to user-provided manifests to add to or replace manifests that are generated by the installer. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + networking: + description: Networking is the configuration for the pod network provider in the cluster. + properties: + clusterNetwork: + description: ClusterNetwork is the list of IP address pools for pods. Default is 10.128.0.0/14 and a host prefix of /23. + items: + description: ClusterNetworkEntry is a single IP address block for pod IP blocks. IP blocks are allocated with size 2^HostSubnetLength. + properties: + cidr: + description: CIDR is the IP block address pool. + type: string + hostPrefix: + description: HostPrefix is the prefix size to allocate to each node from the CIDR. For example, 24 would allocate 2^8=256 adresses to each node. If this field is not used by the plugin, it can be left unset. + format: int32 + type: integer + required: + - cidr + type: object + type: array + machineNetwork: + description: MachineNetwork is the list of IP address pools for machines. + items: + description: MachineNetworkEntry is a single IP address block for node IP blocks. + properties: + cidr: + description: CIDR is the IP block address pool for machines within the cluster. + type: string + required: + - cidr + type: object + type: array + serviceNetwork: + description: 'ServiceNetwork is the list of IP address pools for services. Default is 172.30.0.0/16. NOTE: currently only one entry is supported.' + items: + type: string + maxItems: 1 + type: array + type: object + provisionRequirements: + description: ProvisionRequirements defines configuration for when the installation is ready to be launched automatically. + properties: + controlPlaneAgents: + description: ControlPlaneAgents is the number of matching approved and ready Agents with the control plane role required to launch the install. Must be either 1 or 3. + type: integer + workerAgents: + description: WorkerAgents is the minimum number of matching approved and ready Agents with the worker role required to launch the install. + minimum: 0 + type: integer + required: + - controlPlaneAgents + type: object + sshPublicKey: + description: SSHPublicKey will be added to all cluster hosts for use in debugging. + type: string + required: + - clusterDeploymentRef + - imageSetRef + - networking + - provisionRequirements + type: object + status: + description: AgentClusterInstallStatus defines the observed state of the AgentClusterInstall. + properties: + conditions: + description: Conditions includes more detailed status for the cluster install. + items: + description: ClusterInstallCondition contains details for the current condition of a cluster install. + properties: + lastProbeTime: + description: LastProbeTime is the last time we probed the condition. + format: date-time + type: string + lastTransitionTime: + description: LastTransitionTime is the last time the condition transitioned from one status to another. + format: date-time + type: string + message: + description: Message is a human-readable message indicating details about last transition. + type: string + reason: + description: Reason is a unique, one-word, CamelCase reason for the condition's last transition. + type: string + status: + description: Status is the status of the condition. + type: string + type: + description: Type is the type of the condition. + type: string + required: + - status + - type + type: object + type: array + connectivityMajorityGroups: + type: string + controlPlaneAgentsDiscovered: + description: ControlPlaneAgentsDiscovered is the number of Agents currently linked to this ClusterDeployment. + type: integer + controlPlaneAgentsReady: + description: ControlPlaneAgentsDiscovered is the number of Agents currently linked to this ClusterDeployment that are ready for use. + type: integer + workerAgentsDiscovered: + description: WorkerAgentsDiscovered is the number of worker Agents currently linked to this ClusterDeployment. + type: integer + workerAgentsReady: + description: WorkerAgentsDiscovered is the number of worker Agents currently linked to this ClusterDeployment that are ready for use. + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.4.0 diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 86591c1a809..ca1e6e085e0 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -153,6 +153,26 @@ rules: - patch - update - watch +- apiGroups: + - extensions.hive.openshift.io + resources: + - agentclusterinstalls + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - extensions.hive.openshift.io + resources: + - agentclusterinstalls/status + verbs: + - get + - patch + - update - apiGroups: - hive.openshift.io resources: diff --git a/deploy/olm-catalog/manifests/assisted-service-operator.clusterserviceversion.yaml b/deploy/olm-catalog/manifests/assisted-service-operator.clusterserviceversion.yaml index e95fcb25fdc..18e39cf19e7 100644 --- a/deploy/olm-catalog/manifests/assisted-service-operator.clusterserviceversion.yaml +++ b/deploy/olm-catalog/manifests/assisted-service-operator.clusterserviceversion.yaml @@ -48,6 +48,9 @@ spec: apiservicedefinitions: {} customresourcedefinitions: owned: + - kind: AgentClusterInstall + name: agentclusterinstalls.extensions.hive.openshift.io + version: v1beta1 - displayName: Agent kind: Agent name: agents.agent-install.openshift.io @@ -290,6 +293,26 @@ spec: - patch - update - watch + - apiGroups: + - extensions.hive.openshift.io + resources: + - agentclusterinstalls + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - extensions.hive.openshift.io + resources: + - agentclusterinstalls/status + verbs: + - get + - patch + - update - apiGroups: - hive.openshift.io resources: diff --git a/deploy/olm-catalog/manifests/extensions.hive.openshift.io_agentclusterinstalls.yaml b/deploy/olm-catalog/manifests/extensions.hive.openshift.io_agentclusterinstalls.yaml new file mode 100644 index 00000000000..7b2a47b77d7 --- /dev/null +++ b/deploy/olm-catalog/manifests/extensions.hive.openshift.io_agentclusterinstalls.yaml @@ -0,0 +1,236 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.0 + creationTimestamp: null + name: agentclusterinstalls.extensions.hive.openshift.io +spec: + group: extensions.hive.openshift.io + names: + kind: AgentClusterInstall + listKind: AgentClusterInstallList + plural: agentclusterinstalls + singular: agentclusterinstall + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: AgentClusterInstall represents a request to provision an agent based cluster. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: AgentClusterInstallSpec defines the desired state of the AgentClusterInstall. + properties: + apiVIP: + description: APIVIP is the virtual IP used to reach the OpenShift cluster's API. + type: string + clusterDeploymentRef: + description: ClusterDeploymentRef is a reference to the ClusterDeployment associated with this AgentClusterInstall. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + clusterMetadata: + description: ClusterMetadata contains metadata information about the installed cluster. It should be populated once the cluster install is completed. (it can be populated sooner if desired, but Hive will not copy back to ClusterDeployment until the Installed condition goes True. + properties: + adminKubeconfigSecretRef: + description: AdminKubeconfigSecretRef references the secret containing the admin kubeconfig for this cluster. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + adminPasswordSecretRef: + description: AdminPasswordSecretRef references the secret containing the admin username/password which can be used to login to this cluster. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + clusterID: + description: ClusterID is a globally unique identifier for this cluster generated during installation. Used for reporting metrics among other places. + type: string + infraID: + description: InfraID is an identifier for this cluster generated during installation and used for tagging/naming resources in cloud providers. + type: string + required: + - adminKubeconfigSecretRef + - adminPasswordSecretRef + - clusterID + - infraID + type: object + compute: + description: Compute is the configuration for the machines that comprise the compute nodes. + items: + description: AgentMachinePool is a pool of machines to be installed. + properties: + hyperthreading: + description: Hyperthreading determines the mode of hyperthreading that machines in the pool will utilize. Default is for hyperthreading to be enabled. + enum: + - "" + - Enabled + - Disabled + type: string + type: object + type: array + controlPlane: + description: ControlPlane is the configuration for the machines that comprise the control plane. + properties: + hyperthreading: + description: Hyperthreading determines the mode of hyperthreading that machines in the pool will utilize. Default is for hyperthreading to be enabled. + enum: + - "" + - Enabled + - Disabled + type: string + type: object + imageSetRef: + description: ImageSetRef is a reference to a ClusterImageSet. The release image specified in the ClusterImageSet will be used to install the cluster. + properties: + name: + description: Name is the name of the ClusterImageSet that this refers to + type: string + required: + - name + type: object + ingressVIP: + description: IngressVIP is the virtual IP used for cluster ingress traffic. + type: string + manifestsConfigMapRef: + description: ManifestsConfigMapRef is a reference to user-provided manifests to add to or replace manifests that are generated by the installer. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + networking: + description: Networking is the configuration for the pod network provider in the cluster. + properties: + clusterNetwork: + description: ClusterNetwork is the list of IP address pools for pods. Default is 10.128.0.0/14 and a host prefix of /23. + items: + description: ClusterNetworkEntry is a single IP address block for pod IP blocks. IP blocks are allocated with size 2^HostSubnetLength. + properties: + cidr: + description: CIDR is the IP block address pool. + type: string + hostPrefix: + description: HostPrefix is the prefix size to allocate to each node from the CIDR. For example, 24 would allocate 2^8=256 adresses to each node. If this field is not used by the plugin, it can be left unset. + format: int32 + type: integer + required: + - cidr + type: object + type: array + machineNetwork: + description: MachineNetwork is the list of IP address pools for machines. + items: + description: MachineNetworkEntry is a single IP address block for node IP blocks. + properties: + cidr: + description: CIDR is the IP block address pool for machines within the cluster. + type: string + required: + - cidr + type: object + type: array + serviceNetwork: + description: 'ServiceNetwork is the list of IP address pools for services. Default is 172.30.0.0/16. NOTE: currently only one entry is supported.' + items: + type: string + maxItems: 1 + type: array + type: object + provisionRequirements: + description: ProvisionRequirements defines configuration for when the installation is ready to be launched automatically. + properties: + controlPlaneAgents: + description: ControlPlaneAgents is the number of matching approved and ready Agents with the control plane role required to launch the install. Must be either 1 or 3. + type: integer + workerAgents: + description: WorkerAgents is the minimum number of matching approved and ready Agents with the worker role required to launch the install. + minimum: 0 + type: integer + required: + - controlPlaneAgents + type: object + sshPublicKey: + description: SSHPublicKey will be added to all cluster hosts for use in debugging. + type: string + required: + - clusterDeploymentRef + - imageSetRef + - networking + - provisionRequirements + type: object + status: + description: AgentClusterInstallStatus defines the observed state of the AgentClusterInstall. + properties: + conditions: + description: Conditions includes more detailed status for the cluster install. + items: + description: ClusterInstallCondition contains details for the current condition of a cluster install. + properties: + lastProbeTime: + description: LastProbeTime is the last time we probed the condition. + format: date-time + type: string + lastTransitionTime: + description: LastTransitionTime is the last time the condition transitioned from one status to another. + format: date-time + type: string + message: + description: Message is a human-readable message indicating details about last transition. + type: string + reason: + description: Reason is a unique, one-word, CamelCase reason for the condition's last transition. + type: string + status: + description: Status is the status of the condition. + type: string + type: + description: Type is the type of the condition. + type: string + required: + - status + - type + type: object + type: array + connectivityMajorityGroups: + type: string + controlPlaneAgentsDiscovered: + description: ControlPlaneAgentsDiscovered is the number of Agents currently linked to this ClusterDeployment. + type: integer + controlPlaneAgentsReady: + description: ControlPlaneAgentsDiscovered is the number of Agents currently linked to this ClusterDeployment that are ready for use. + type: integer + workerAgentsDiscovered: + description: WorkerAgentsDiscovered is the number of worker Agents currently linked to this ClusterDeployment. + type: integer + workerAgentsReady: + description: WorkerAgentsDiscovered is the number of worker Agents currently linked to this ClusterDeployment that are ready for use. + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/deploy/olm-catalog/metadata/annotations.yaml b/deploy/olm-catalog/metadata/annotations.yaml index 29eb53084ab..0d0bed732b7 100644 --- a/deploy/olm-catalog/metadata/annotations.yaml +++ b/deploy/olm-catalog/metadata/annotations.yaml @@ -6,9 +6,9 @@ annotations: operators.operatorframework.io.bundle.package.v1: assisted-service-operator operators.operatorframework.io.bundle.channels.v1: alpha,ocm-2.3 operators.operatorframework.io.bundle.channel.default.v1: alpha - operators.operatorframework.io.metrics.mediatype.v1: metrics+v1 operators.operatorframework.io.metrics.builder: operator-sdk-v1.6.1+git operators.operatorframework.io.metrics.project_layout: go.kubebuilder.io/v3 + operators.operatorframework.io.metrics.mediatype.v1: metrics+v1 # Annotations for testing. operators.operatorframework.io.test.mediatype.v1: scorecard+v1 diff --git a/go.mod b/go.mod index c61f27f23da..fe17c462b4d 100644 --- a/go.mod +++ b/go.mod @@ -45,7 +45,7 @@ require ( github.com/openshift/api v3.9.1-0.20191111211345-a27ff30ebf09+incompatible github.com/openshift/client-go v0.0.0-20201020074620-f8fd44879f7c github.com/openshift/custom-resource-status v1.1.0 - github.com/openshift/hive/apis v0.0.0-20210415080537-ea6f0a2dd76c + github.com/openshift/hive/apis v0.0.0-20210506000654-5c038fb05190 github.com/openshift/machine-api-operator v0.2.1-0.20201002104344-6abfb5440597 github.com/ory/dockertest/v3 v3.6.3 github.com/patrickmn/go-cache v2.1.0+incompatible @@ -75,7 +75,6 @@ require ( k8s.io/utils v0.0.0-20201110183641-67b214c5f920 sigs.k8s.io/controller-runtime v0.7.2 sigs.k8s.io/yaml v1.2.0 - ) replace ( diff --git a/go.sum b/go.sum index 8efd45731ec..d4fde632e68 100644 --- a/go.sum +++ b/go.sum @@ -321,6 +321,8 @@ github.com/denis-tingajkin/go-header v0.3.1/go.mod h1:sq/2IxMhaZX+RRcgHfCRx/m0M5 github.com/denisenkom/go-mssqldb v0.0.0-20181014144952-4e0d7dc8888f/go.mod h1:xN/JuLBIz4bjkxNmByTiV1IbhfnYb6oo99phBn4Eqhc= github.com/denisenkom/go-mssqldb v0.0.0-20191124224453-732737034ffd h1:83Wprp6ROGeiHFAP8WJdI2RoxALQYgdllERc3N5N2DM= github.com/denisenkom/go-mssqldb v0.0.0-20191124224453-732737034ffd/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= +github.com/dgoodwin/hive/apis v0.0.0-20210426140401-97292266a297 h1:sNV683xs4A3DBQRdInwevaO6VUR5debstXnhQAcuEBs= +github.com/dgoodwin/hive/apis v0.0.0-20210426140401-97292266a297/go.mod h1:Ujw9ImzSYvo9VlUX6Gjy7zPFP7xYUAU50tdf1wPpN6c= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954 h1:RMLoZVzv4GliuWafOuPuQDKSm1SJph7uCRnnS61JAn4= @@ -1195,8 +1197,9 @@ github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20201002065957-9854f74205 github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20201002065957-9854f7420570/go.mod h1:7NRECVE26rvP1/fs1CbhfY5gsgnnFQNhb9txTFzWmUw= github.com/openshift/custom-resource-status v1.1.0 h1:EjSh0f3vF6eaS3zAToVHUXcS7N2jVEosUFJ0sRKvmZ0= github.com/openshift/custom-resource-status v1.1.0/go.mod h1:GDjWl0tX6FNIj82vIxeudWeSx2Ff6nDZ8uJn0ohUFvo= -github.com/openshift/hive/apis v0.0.0-20210415080537-ea6f0a2dd76c h1:wRnkZPHBkVmzLk3c6yPMWeTQUS8jsWAE1PY5EutiDdY= -github.com/openshift/hive/apis v0.0.0-20210415080537-ea6f0a2dd76c/go.mod h1:Ujw9ImzSYvo9VlUX6Gjy7zPFP7xYUAU50tdf1wPpN6c= +github.com/openshift/hive/apis v0.0.0-20210426191512-e906999ec6ed/go.mod h1:Ujw9ImzSYvo9VlUX6Gjy7zPFP7xYUAU50tdf1wPpN6c= +github.com/openshift/hive/apis v0.0.0-20210506000654-5c038fb05190 h1:8eShHtqtKwgJWdJh/H0ytAsAblDCA1bAjvokIfuU2jM= +github.com/openshift/hive/apis v0.0.0-20210506000654-5c038fb05190/go.mod h1:Ujw9ImzSYvo9VlUX6Gjy7zPFP7xYUAU50tdf1wPpN6c= github.com/openshift/library-go v0.0.0-20200512120242-21a1ff978534/go.mod h1:2kWwXTkpoQJUN3jZ3QW88EIY1hdRMqxgRs2hheEW/pg= github.com/openshift/library-go v0.0.0-20200909173121-1d055d971916 h1:H9XwZlu78Pn87KNBGGjZY0L2fLOoPnzEajigVur5ZOY= github.com/openshift/library-go v0.0.0-20200909173121-1d055d971916/go.mod h1:6vwp+YhYOIlj8MpkQKkebTTSn2TuYyvgiAFQ206jIEQ= diff --git a/hack/crds/hive.openshift.io_clusterdeployments.yaml b/hack/crds/hive.openshift.io_clusterdeployments.yaml index da4954dda29..1bd825fa27b 100644 --- a/hack/crds/hive.openshift.io_clusterdeployments.yaml +++ b/hack/crds/hive.openshift.io_clusterdeployments.yaml @@ -5,37 +5,37 @@ metadata: name: clusterdeployments.hive.openshift.io spec: additionalPrinterColumns: - - JSONPath: .metadata.labels.hive\.openshift\.io/cluster-platform - name: Platform - type: string - - JSONPath: .metadata.labels.hive\.openshift\.io/cluster-region - name: Region - type: string - - JSONPath: .metadata.labels.hive\.openshift\.io/cluster-type - name: ClusterType - type: string - - JSONPath: .spec.installed - name: Installed - type: boolean - - JSONPath: .spec.clusterMetadata.infraID - name: InfraID - type: string - - JSONPath: .metadata.labels.hive\.openshift\.io/version-major-minor-patch - name: Version - type: string - - JSONPath: .status.conditions[?(@.type=='Hibernating')].reason - name: PowerState - type: string - - JSONPath: .metadata.creationTimestamp - name: Age - type: date + - JSONPath: .metadata.labels.hive\.openshift\.io/cluster-platform + name: Platform + type: string + - JSONPath: .metadata.labels.hive\.openshift\.io/cluster-region + name: Region + type: string + - JSONPath: .metadata.labels.hive\.openshift\.io/cluster-type + name: ClusterType + type: string + - JSONPath: .spec.installed + name: Installed + type: boolean + - JSONPath: .spec.clusterMetadata.infraID + name: InfraID + type: string + - JSONPath: .metadata.labels.hive\.openshift\.io/version-major-minor-patch + name: Version + type: string + - JSONPath: .status.conditions[?(@.type=='Hibernating')].reason + name: PowerState + type: string + - JSONPath: .metadata.creationTimestamp + name: Age + type: date group: hive.openshift.io names: kind: ClusterDeployment listKind: ClusterDeploymentList plural: clusterdeployments shortNames: - - cd + - cd singular: clusterdeployment scope: Namespaced subresources: @@ -104,10 +104,30 @@ spec: plane serving certs type: string required: - - certificateSecretRef - - name + - certificateSecretRef + - name type: object type: array + clusterInstallRef: + description: ClusterInstallLocalReference provides reference to an object + that implements the hivecontract ClusterInstall. The namespace of + the object is same as the ClusterDeployment. This cannot be set when + Provisioning is also set. + properties: + group: + type: string + kind: + type: string + name: + type: string + version: + type: string + required: + - group + - kind + - name + - version + type: object clusterMetadata: description: ClusterMetadata contains metadata information about the installed cluster. @@ -142,10 +162,10 @@ spec: providers. type: string required: - - adminKubeconfigSecretRef - - adminPasswordSecretRef - - clusterID - - infraID + - adminKubeconfigSecretRef + - adminPasswordSecretRef + - clusterID + - infraID type: object clusterName: description: ClusterName is the friendly name of the cluster. It is @@ -168,8 +188,8 @@ spec: cluster was created. type: string required: - - namespace - - poolName + - namespace + - poolName type: object controlPlaneConfig: description: ControlPlaneConfig contains additional configuration for @@ -208,8 +228,8 @@ spec: additional certificate. type: string required: - - domain - - name + - domain + - name type: object type: array default: @@ -271,8 +291,8 @@ spec: type: string type: array required: - - key - - operator + - key + - operator type: object type: array matchLabels: @@ -316,8 +336,8 @@ spec: type: string type: array required: - - key - - operator + - key + - operator type: object type: array matchLabels: @@ -335,8 +355,8 @@ spec: in the ClusterDeployment.Spec that should be used for this Ingress type: string required: - - domain - - name + - domain + - name type: object type: array installAttemptsLimit: @@ -375,8 +395,7 @@ spec: properties: agentBareMetal: description: AgentBareMetal is the configuration used when performing - an Assisted Agent based installation to bare metal. Can only be - used with the Assisted InstallStrategy. + an Assisted Agent based installation to bare metal. properties: agentSelector: description: AgentSelector is a label selector used for associating @@ -410,8 +429,8 @@ spec: type: string type: array required: - - key - - operator + - key + - operator type: object type: array matchLabels: @@ -424,24 +443,27 @@ spec: only "value". The requirements are ANDed. type: object type: object - apiVIP: - description: APIVIP is the virtual IP used to reach the OpenShift - cluster's API. - type: string - apiVIPDNSName: - description: APIVIPDNSName is the domain name used to reach - the OpenShift cluster API. - type: string - ingressVIP: - description: IngressVIP is the virtual IP used for cluster ingress - traffic. - type: string required: - - agentSelector + - agentSelector type: object aws: description: AWS is the configuration used when installing on AWS. properties: + credentialsAssumeRole: + description: CredentialsAssumeRole refers to the IAM role that + must be assumed to obtain AWS account access for the cluster + operations. + properties: + externalID: + description: 'ExternalID is random string generated by platform + so that assume role is protected from confused deputy + problem. more info: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html' + type: string + roleARN: + type: string + required: + - roleARN + type: object credentialsSecretRef: description: CredentialsSecretRef refers to a secret that contains the AWS account access credentials. @@ -461,7 +483,7 @@ spec: enabled: type: boolean required: - - enabled + - enabled type: object region: description: Region specifies the AWS region where the cluster @@ -474,8 +496,7 @@ spec: created for the cluster. type: object required: - - credentialsSecretRef - - region + - region type: object azure: description: Azure is the configuration used when installing on @@ -499,8 +520,8 @@ spec: will be created. type: string required: - - credentialsSecretRef - - region + - credentialsSecretRef + - region type: object baremetal: description: BareMetal is the configuration used when installing @@ -519,7 +540,7 @@ spec: type: string type: object required: - - libvirtSSHPrivateKeySecretRef + - libvirtSSHPrivateKeySecretRef type: object gcp: description: GCP is the configuration used when installing on Google @@ -539,8 +560,8 @@ spec: will be created. type: string required: - - credentialsSecretRef - - region + - credentialsSecretRef + - region type: object openstack: description: OpenStack is the configuration used when installing @@ -584,8 +605,8 @@ spec: ports in your OpenShift cluster. type: boolean required: - - cloud - - credentialsSecretRef + - cloud + - credentialsSecretRef type: object ovirt: description: Ovirt is the configuration used when installing on @@ -624,10 +645,10 @@ spec: would be created. type: string required: - - certificatesSecretRef - - credentialsSecretRef - - ovirt_cluster_id - - storage_domain_id + - certificatesSecretRef + - credentialsSecretRef + - ovirt_cluster_id + - storage_domain_id type: object vsphere: description: VSphere is the configuration used when installing on @@ -678,20 +699,20 @@ spec: vCenter. type: string required: - - certificatesSecretRef - - credentialsSecretRef - - datacenter - - defaultDatastore - - vCenter + - certificatesSecretRef + - credentialsSecretRef + - datacenter + - defaultDatastore + - vCenter type: object type: object powerState: description: PowerState indicates whether a cluster should be running or hibernating. When omitted, PowerState defaults to the Running state. enum: - - "" - - Running - - Hibernating + - "" + - Running + - Hibernating type: string preserveOnDelete: description: PreserveOnDelete allows the user to disconnect a cluster @@ -711,7 +732,7 @@ spec: refers to type: string required: - - name + - name type: object installConfigSecretRef: description: InstallConfigSecretRef is the reference to a secret @@ -725,95 +746,6 @@ spec: TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object - installStrategy: - description: InstallStrategy provides platform agnostic configuration - for the use of alternate install strategies. Defaults to openshift-install - if none specified. - properties: - agent: - description: Agent is the install strategy configuration for - provisioning a cluster with the Agent based assisted installer. - properties: - networking: - description: Networking is the configuration for the pod - network provider in the cluster. - properties: - clusterNetwork: - description: ClusterNetwork is the list of IP address - pools for pods. Default is 10.128.0.0/14 and a host - prefix of /23. - items: - description: ClusterNetworkEntry is a single IP address - block for pod IP blocks. IP blocks are allocated - with size 2^HostSubnetLength. - properties: - cidr: - description: CIDR is the IP block address pool. - type: string - hostPrefix: - description: HostPrefix is the prefix size to - allocate to each node from the CIDR. For example, - 24 would allocate 2^8=256 adresses to each node. - If this field is not used by the plugin, it - can be left unset. - format: int32 - type: integer - required: - - cidr - type: object - type: array - machineNetwork: - description: MachineNetwork is the list of IP address - pools for machines. - items: - description: MachineNetworkEntry is a single IP address - block for node IP blocks. - properties: - cidr: - description: CIDR is the IP block address pool - for machines within the cluster. - type: string - required: - - cidr - type: object - type: array - serviceNetwork: - description: 'ServiceNetwork is the list of IP address - pools for services. Default is 172.30.0.0/16. NOTE: - currently only one entry is supported.' - items: - type: string - maxItems: 1 - type: array - type: object - provisionRequirements: - description: ProvisionRequirements defines configuration - for when the installation is ready to be launched automatically. - properties: - controlPlaneAgents: - description: ControlPlaneAgents is the number of matching - approved and ready Agents with the control plane role - required to launch the install. Must be either 1 or - 3. - type: integer - workerAgents: - description: WorkerAgents is the minimum number of matching - approved and ready Agents with the worker role required - to launch the install. - minimum: 0 - type: integer - required: - - controlPlaneAgents - type: object - sshPublicKey: - description: SSHPublicKey will be added to all cluster hosts - for use in debugging. - type: string - required: - - networking - - provisionRequirements - type: object - type: object installerEnv: description: InstallerEnv are extra environment variables to pass through to the installer. This may be used to enable additional @@ -855,7 +787,7 @@ spec: key must be defined type: boolean required: - - key + - key type: object fieldRef: description: 'Selects a field of the pod: supports metadata.name, @@ -872,7 +804,7 @@ spec: API version. type: string required: - - fieldPath + - fieldPath type: object resourceFieldRef: description: 'Selects a resource of the container: only @@ -892,7 +824,7 @@ spec: description: 'Required: resource to select' type: string required: - - resource + - resource type: object secretKeyRef: description: Selects a key of a secret in the pod's namespace @@ -911,11 +843,11 @@ spec: must be defined type: boolean required: - - key + - key type: object type: object required: - - name + - name type: object type: array manifestsConfigMapRef: @@ -968,9 +900,9 @@ spec: type: string type: object required: - - baseDomain - - clusterName - - platform + - baseDomain + - clusterName + - platform type: object status: description: ClusterDeploymentStatus defines the observed state of ClusterDeployment @@ -993,8 +925,8 @@ spec: description: Name of the certificate bundle type: string required: - - generated - - name + - generated + - name type: object type: array cliImage: @@ -1032,8 +964,8 @@ spec: description: Type is the type of the condition. type: string required: - - status - - type + - status + - type type: object type: array installRestarts: @@ -1045,36 +977,6 @@ spec: were met and cluster installation was launched. format: date-time type: string - installStrategy: - description: InstallStrategy contains observed state from specific install - strategies. - properties: - agent: - description: Agent defines the observed state of the Agent install - strategy for this cluster. - properties: - connectivityMajorityGroups: - type: string - controlPlaneAgentsDiscovered: - description: ControlPlaneAgentsDiscovered is the number of Agents - currently linked to this ClusterDeployment. - type: integer - controlPlaneAgentsReady: - description: ControlPlaneAgentsDiscovered is the number of Agents - currently linked to this ClusterDeployment that are ready - for use. - type: integer - workerAgentsDiscovered: - description: WorkerAgentsDiscovered is the number of worker - Agents currently linked to this ClusterDeployment. - type: integer - workerAgentsReady: - description: WorkerAgentsDiscovered is the number of worker - Agents currently linked to this ClusterDeployment that are - ready for use. - type: integer - type: object - type: object installVersion: description: InstallVersion is the version of OpenShift as reported by the release image resolved for the installation. @@ -1129,12 +1031,12 @@ spec: type: object version: v1 versions: - - name: v1 - served: true - storage: true + - name: v1 + served: true + storage: true status: acceptedNames: kind: "" plural: "" conditions: [] - storedVersions: [] \ No newline at end of file + storedVersions: [] diff --git a/internal/controller/api/hiveextension/v1beta1/agentclusterinstall_types.go b/internal/controller/api/hiveextension/v1beta1/agentclusterinstall_types.go new file mode 100644 index 00000000000..2978a5adda8 --- /dev/null +++ b/internal/controller/api/hiveextension/v1beta1/agentclusterinstall_types.go @@ -0,0 +1,180 @@ +package v1beta1 + +import ( + hivev1 "github.com/openshift/hive/apis/hive/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AgentClusterInstall represents a request to provision an agent based cluster. +// +// +k8s:openapi-gen=true +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +type AgentClusterInstall struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AgentClusterInstallSpec `json:"spec"` + Status AgentClusterInstallStatus `json:"status,omitempty"` +} + +// AgentClusterInstallSpec defines the desired state of the AgentClusterInstall. +type AgentClusterInstallSpec struct { + + // ImageSetRef is a reference to a ClusterImageSet. The release image specified in the ClusterImageSet will be used + // to install the cluster. + ImageSetRef hivev1.ClusterImageSetReference `json:"imageSetRef"` + + // ClusterDeploymentRef is a reference to the ClusterDeployment associated with this AgentClusterInstall. + ClusterDeploymentRef corev1.LocalObjectReference `json:"clusterDeploymentRef"` + + // ClusterMetadata contains metadata information about the installed cluster. It should be populated once the cluster install is completed. (it can be populated sooner if desired, but Hive will not copy back to ClusterDeployment until the Installed condition goes True. + ClusterMetadata *hivev1.ClusterMetadata `json:"clusterMetadata,omitempty"` + + // ManifestsConfigMapRef is a reference to user-provided manifests to + // add to or replace manifests that are generated by the installer. + ManifestsConfigMapRef *corev1.LocalObjectReference `json:"manifestsConfigMapRef,omitempty"` + + // Networking is the configuration for the pod network provider in + // the cluster. + Networking Networking `json:"networking"` + + // SSHPublicKey will be added to all cluster hosts for use in debugging. + // +optional + SSHPublicKey string `json:"sshPublicKey,omitempty"` + + // ProvisionRequirements defines configuration for when the installation is ready to be launched automatically. + ProvisionRequirements ProvisionRequirements `json:"provisionRequirements"` + + // ControlPlane is the configuration for the machines that comprise the + // control plane. + // +optional + ControlPlane *AgentMachinePool `json:"controlPlane,omitempty"` + + // Compute is the configuration for the machines that comprise the + // compute nodes. + // +optional + Compute []AgentMachinePool `json:"compute,omitempty"` + + // APIVIP is the virtual IP used to reach the OpenShift cluster's API. + // +optional + APIVIP string `json:"apiVIP,omitempty"` + + // IngressVIP is the virtual IP used for cluster ingress traffic. + // +optional + IngressVIP string `json:"ingressVIP,omitempty"` +} + +// AgentClusterInstallStatus defines the observed state of the AgentClusterInstall. +type AgentClusterInstallStatus struct { + // Conditions includes more detailed status for the cluster install. + // +optional + Conditions []hivev1.ClusterInstallCondition `json:"conditions,omitempty"` + + // ControlPlaneAgentsDiscovered is the number of Agents currently linked to this ClusterDeployment. + // +optional + ControlPlaneAgentsDiscovered int `json:"controlPlaneAgentsDiscovered,omitempty"` + // ControlPlaneAgentsDiscovered is the number of Agents currently linked to this ClusterDeployment that are ready for use. + // +optional + ControlPlaneAgentsReady int `json:"controlPlaneAgentsReady,omitempty"` + // WorkerAgentsDiscovered is the number of worker Agents currently linked to this ClusterDeployment. + // +optional + WorkerAgentsDiscovered int `json:"workerAgentsDiscovered,omitempty"` + // WorkerAgentsDiscovered is the number of worker Agents currently linked to this ClusterDeployment that are ready for use. + // +optional + WorkerAgentsReady int `json:"workerAgentsReady,omitempty"` + + ConnectivityMajorityGroups string `json:"connectivityMajorityGroups,omitempty"` +} + +// Networking defines the pod network provider in the cluster. +type Networking struct { + // MachineNetwork is the list of IP address pools for machines. + // +optional + MachineNetwork []MachineNetworkEntry `json:"machineNetwork,omitempty"` + + // ClusterNetwork is the list of IP address pools for pods. + // Default is 10.128.0.0/14 and a host prefix of /23. + // + // +optional + ClusterNetwork []ClusterNetworkEntry `json:"clusterNetwork,omitempty"` + + // ServiceNetwork is the list of IP address pools for services. + // Default is 172.30.0.0/16. + // NOTE: currently only one entry is supported. + // + // +kubebuilder:validation:MaxItems=1 + // +optional + ServiceNetwork []string `json:"serviceNetwork,omitempty"` +} + +// MachineNetworkEntry is a single IP address block for node IP blocks. +type MachineNetworkEntry struct { + // CIDR is the IP block address pool for machines within the cluster. + CIDR string `json:"cidr"` +} + +// ClusterNetworkEntry is a single IP address block for pod IP blocks. IP blocks +// are allocated with size 2^HostSubnetLength. +type ClusterNetworkEntry struct { + // CIDR is the IP block address pool. + CIDR string `json:"cidr"` + + // HostPrefix is the prefix size to allocate to each node from the CIDR. + // For example, 24 would allocate 2^8=256 adresses to each node. If this + // field is not used by the plugin, it can be left unset. + // +optional + HostPrefix int32 `json:"hostPrefix,omitempty"` +} + +// ProvisionRequirements defines configuration for when the installation is ready to be launched automatically. +type ProvisionRequirements struct { + + // ControlPlaneAgents is the number of matching approved and ready Agents with the control plane role + // required to launch the install. Must be either 1 or 3. + ControlPlaneAgents int `json:"controlPlaneAgents"` + + // WorkerAgents is the minimum number of matching approved and ready Agents with the worker role + // required to launch the install. + // +kubebuilder:validation:Minimum=0 + // +optional + WorkerAgents int `json:"workerAgents,omitempty"` +} + +// HyperthreadingMode is the mode of hyperthreading for a machine. +// +kubebuilder:validation:Enum="";Enabled;Disabled +type HyperthreadingMode string + +const ( + // HyperthreadingEnabled indicates that hyperthreading is enabled. + HyperthreadingEnabled HyperthreadingMode = "Enabled" + // HyperthreadingDisabled indicates that hyperthreading is disabled. + HyperthreadingDisabled HyperthreadingMode = "Disabled" +) + +// AgentMachinePool is a pool of machines to be installed. +type AgentMachinePool struct { + // Hyperthreading determines the mode of hyperthreading that machines in the + // pool will utilize. + // Default is for hyperthreading to be enabled. + // + // +optional + Hyperthreading HyperthreadingMode `json:"hyperthreading,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// AgentClusterInstallList contains a list of AgentClusterInstalls +type AgentClusterInstallList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AgentClusterInstall `json:"items"` +} + +func init() { + SchemeBuilder.Register(&AgentClusterInstall{}, &AgentClusterInstallList{}) +} diff --git a/internal/controller/api/hiveextension/v1beta1/groupversion_info.go b/internal/controller/api/hiveextension/v1beta1/groupversion_info.go new file mode 100644 index 00000000000..cf479293441 --- /dev/null +++ b/internal/controller/api/hiveextension/v1beta1/groupversion_info.go @@ -0,0 +1,41 @@ +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1beta1 contains API Schema definitions for the extensions.hive.openshift.io v1beta1 API group +// +kubebuilder:object:generate=true +// +groupName=extensions.hive.openshift.io +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +const ( + Group string = "extensions.hive.openshift.io" + Version string = "v1beta1" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: Group, Version: Version} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/internal/controller/api/hiveextension/v1beta1/zz_generated.deepcopy.go b/internal/controller/api/hiveextension/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 00000000000..4b54d571304 --- /dev/null +++ b/internal/controller/api/hiveextension/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,237 @@ +// +build !ignore_autogenerated + +/* +Copyright 2020. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "github.com/openshift/hive/apis/hive/v1" + corev1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AgentClusterInstall) DeepCopyInto(out *AgentClusterInstall) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentClusterInstall. +func (in *AgentClusterInstall) DeepCopy() *AgentClusterInstall { + if in == nil { + return nil + } + out := new(AgentClusterInstall) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AgentClusterInstall) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AgentClusterInstallList) DeepCopyInto(out *AgentClusterInstallList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AgentClusterInstall, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentClusterInstallList. +func (in *AgentClusterInstallList) DeepCopy() *AgentClusterInstallList { + if in == nil { + return nil + } + out := new(AgentClusterInstallList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AgentClusterInstallList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AgentClusterInstallSpec) DeepCopyInto(out *AgentClusterInstallSpec) { + *out = *in + out.ImageSetRef = in.ImageSetRef + out.ClusterDeploymentRef = in.ClusterDeploymentRef + if in.ClusterMetadata != nil { + in, out := &in.ClusterMetadata, &out.ClusterMetadata + *out = new(v1.ClusterMetadata) + **out = **in + } + if in.ManifestsConfigMapRef != nil { + in, out := &in.ManifestsConfigMapRef, &out.ManifestsConfigMapRef + *out = new(corev1.LocalObjectReference) + **out = **in + } + in.Networking.DeepCopyInto(&out.Networking) + out.ProvisionRequirements = in.ProvisionRequirements + if in.ControlPlane != nil { + in, out := &in.ControlPlane, &out.ControlPlane + *out = new(AgentMachinePool) + **out = **in + } + if in.Compute != nil { + in, out := &in.Compute, &out.Compute + *out = make([]AgentMachinePool, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentClusterInstallSpec. +func (in *AgentClusterInstallSpec) DeepCopy() *AgentClusterInstallSpec { + if in == nil { + return nil + } + out := new(AgentClusterInstallSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AgentClusterInstallStatus) DeepCopyInto(out *AgentClusterInstallStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.ClusterInstallCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentClusterInstallStatus. +func (in *AgentClusterInstallStatus) DeepCopy() *AgentClusterInstallStatus { + if in == nil { + return nil + } + out := new(AgentClusterInstallStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AgentMachinePool) DeepCopyInto(out *AgentMachinePool) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentMachinePool. +func (in *AgentMachinePool) DeepCopy() *AgentMachinePool { + if in == nil { + return nil + } + out := new(AgentMachinePool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterNetworkEntry) DeepCopyInto(out *ClusterNetworkEntry) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkEntry. +func (in *ClusterNetworkEntry) DeepCopy() *ClusterNetworkEntry { + if in == nil { + return nil + } + out := new(ClusterNetworkEntry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineNetworkEntry) DeepCopyInto(out *MachineNetworkEntry) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineNetworkEntry. +func (in *MachineNetworkEntry) DeepCopy() *MachineNetworkEntry { + if in == nil { + return nil + } + out := new(MachineNetworkEntry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Networking) DeepCopyInto(out *Networking) { + *out = *in + if in.MachineNetwork != nil { + in, out := &in.MachineNetwork, &out.MachineNetwork + *out = make([]MachineNetworkEntry, len(*in)) + copy(*out, *in) + } + if in.ClusterNetwork != nil { + in, out := &in.ClusterNetwork, &out.ClusterNetwork + *out = make([]ClusterNetworkEntry, len(*in)) + copy(*out, *in) + } + if in.ServiceNetwork != nil { + in, out := &in.ServiceNetwork, &out.ServiceNetwork + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Networking. +func (in *Networking) DeepCopy() *Networking { + if in == nil { + return nil + } + out := new(Networking) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProvisionRequirements) DeepCopyInto(out *ProvisionRequirements) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProvisionRequirements. +func (in *ProvisionRequirements) DeepCopy() *ProvisionRequirements { + if in == nil { + return nil + } + out := new(ProvisionRequirements) + in.DeepCopyInto(out) + return out +} diff --git a/internal/controller/config/crd/bases/extensions.hive.openshift.io_agentclusterinstalls.yaml b/internal/controller/config/crd/bases/extensions.hive.openshift.io_agentclusterinstalls.yaml new file mode 100644 index 00000000000..043db9a73b6 --- /dev/null +++ b/internal/controller/config/crd/bases/extensions.hive.openshift.io_agentclusterinstalls.yaml @@ -0,0 +1,300 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.0 + creationTimestamp: null + name: agentclusterinstalls.extensions.hive.openshift.io +spec: + group: extensions.hive.openshift.io + names: + kind: AgentClusterInstall + listKind: AgentClusterInstallList + plural: agentclusterinstalls + singular: agentclusterinstall + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: AgentClusterInstall represents a request to provision an agent + based cluster. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: AgentClusterInstallSpec defines the desired state of the + AgentClusterInstall. + properties: + apiVIP: + description: APIVIP is the virtual IP used to reach the OpenShift + cluster's API. + type: string + clusterDeploymentRef: + description: ClusterDeploymentRef is a reference to the ClusterDeployment + associated with this AgentClusterInstall. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + clusterMetadata: + description: ClusterMetadata contains metadata information about the + installed cluster. It should be populated once the cluster install + is completed. (it can be populated sooner if desired, but Hive will + not copy back to ClusterDeployment until the Installed condition + goes True. + properties: + adminKubeconfigSecretRef: + description: AdminKubeconfigSecretRef references the secret containing + the admin kubeconfig for this cluster. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + adminPasswordSecretRef: + description: AdminPasswordSecretRef references the secret containing + the admin username/password which can be used to login to this + cluster. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + clusterID: + description: ClusterID is a globally unique identifier for this + cluster generated during installation. Used for reporting metrics + among other places. + type: string + infraID: + description: InfraID is an identifier for this cluster generated + during installation and used for tagging/naming resources in + cloud providers. + type: string + required: + - adminKubeconfigSecretRef + - adminPasswordSecretRef + - clusterID + - infraID + type: object + compute: + description: Compute is the configuration for the machines that comprise + the compute nodes. + items: + description: AgentMachinePool is a pool of machines to be installed. + properties: + hyperthreading: + description: Hyperthreading determines the mode of hyperthreading + that machines in the pool will utilize. Default is for hyperthreading + to be enabled. + enum: + - "" + - Enabled + - Disabled + type: string + type: object + type: array + controlPlane: + description: ControlPlane is the configuration for the machines that + comprise the control plane. + properties: + hyperthreading: + description: Hyperthreading determines the mode of hyperthreading + that machines in the pool will utilize. Default is for hyperthreading + to be enabled. + enum: + - "" + - Enabled + - Disabled + type: string + type: object + imageSetRef: + description: ImageSetRef is a reference to a ClusterImageSet. The + release image specified in the ClusterImageSet will be used to install + the cluster. + properties: + name: + description: Name is the name of the ClusterImageSet that this + refers to + type: string + required: + - name + type: object + ingressVIP: + description: IngressVIP is the virtual IP used for cluster ingress + traffic. + type: string + manifestsConfigMapRef: + description: ManifestsConfigMapRef is a reference to user-provided + manifests to add to or replace manifests that are generated by the + installer. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + networking: + description: Networking is the configuration for the pod network provider + in the cluster. + properties: + clusterNetwork: + description: ClusterNetwork is the list of IP address pools for + pods. Default is 10.128.0.0/14 and a host prefix of /23. + items: + description: ClusterNetworkEntry is a single IP address block + for pod IP blocks. IP blocks are allocated with size 2^HostSubnetLength. + properties: + cidr: + description: CIDR is the IP block address pool. + type: string + hostPrefix: + description: HostPrefix is the prefix size to allocate to + each node from the CIDR. For example, 24 would allocate + 2^8=256 adresses to each node. If this field is not used + by the plugin, it can be left unset. + format: int32 + type: integer + required: + - cidr + type: object + type: array + machineNetwork: + description: MachineNetwork is the list of IP address pools for + machines. + items: + description: MachineNetworkEntry is a single IP address block + for node IP blocks. + properties: + cidr: + description: CIDR is the IP block address pool for machines + within the cluster. + type: string + required: + - cidr + type: object + type: array + serviceNetwork: + description: 'ServiceNetwork is the list of IP address pools for + services. Default is 172.30.0.0/16. NOTE: currently only one + entry is supported.' + items: + type: string + maxItems: 1 + type: array + type: object + provisionRequirements: + description: ProvisionRequirements defines configuration for when + the installation is ready to be launched automatically. + properties: + controlPlaneAgents: + description: ControlPlaneAgents is the number of matching approved + and ready Agents with the control plane role required to launch + the install. Must be either 1 or 3. + type: integer + workerAgents: + description: WorkerAgents is the minimum number of matching approved + and ready Agents with the worker role required to launch the + install. + minimum: 0 + type: integer + required: + - controlPlaneAgents + type: object + sshPublicKey: + description: SSHPublicKey will be added to all cluster hosts for use + in debugging. + type: string + required: + - clusterDeploymentRef + - imageSetRef + - networking + - provisionRequirements + type: object + status: + description: AgentClusterInstallStatus defines the observed state of the + AgentClusterInstall. + properties: + conditions: + description: Conditions includes more detailed status for the cluster + install. + items: + description: ClusterInstallCondition contains details for the current + condition of a cluster install. + properties: + lastProbeTime: + description: LastProbeTime is the last time we probed the condition. + format: date-time + type: string + lastTransitionTime: + description: LastTransitionTime is the last time the condition + transitioned from one status to another. + format: date-time + type: string + message: + description: Message is a human-readable message indicating + details about last transition. + type: string + reason: + description: Reason is a unique, one-word, CamelCase reason + for the condition's last transition. + type: string + status: + description: Status is the status of the condition. + type: string + type: + description: Type is the type of the condition. + type: string + required: + - status + - type + type: object + type: array + connectivityMajorityGroups: + type: string + controlPlaneAgentsDiscovered: + description: ControlPlaneAgentsDiscovered is the number of Agents + currently linked to this ClusterDeployment. + type: integer + controlPlaneAgentsReady: + description: ControlPlaneAgentsDiscovered is the number of Agents + currently linked to this ClusterDeployment that are ready for use. + type: integer + workerAgentsDiscovered: + description: WorkerAgentsDiscovered is the number of worker Agents + currently linked to this ClusterDeployment. + type: integer + workerAgentsReady: + description: WorkerAgentsDiscovered is the number of worker Agents + currently linked to this ClusterDeployment that are ready for use. + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/internal/controller/config/crd/kustomization.yaml b/internal/controller/config/crd/kustomization.yaml new file mode 100644 index 00000000000..767289996ab --- /dev/null +++ b/internal/controller/config/crd/kustomization.yaml @@ -0,0 +1,29 @@ +# This kustomization.yaml is not intended to be run by itself, +# since it depends on service name and namespace that are out of this kustomize package. +# It should be run by config/default +resources: +- bases/agent-install.openshift.io_agentserviceconfigs.yaml +- bases/agent-install.openshift.io_infraenvs.yaml +- bases/agent-install.openshift.io_agents.yaml +- bases/agent-install.openshift.io_nmstateconfigs.yaml +- bases/extensions.hive.openshift.io_agentclusterinstalls.yaml +# +kubebuilder:scaffold:crdkustomizeresource + +patchesStrategicMerge: +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. +# patches here are for enabling the conversion webhook for each CRD +#- patches/webhook_in_images.yaml +#- patches/webhook_in_clusters.yaml +#- patches/webhook_in_hosts.yaml +# +kubebuilder:scaffold:crdkustomizewebhookpatch + +# [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix. +# patches here are for enabling the CA injection for each CRD +#- patches/cainjection_in_images.yaml +#- patches/cainjection_in_clusters.yaml +#- patches/cainjection_in_hosts.yaml +# +kubebuilder:scaffold:crdkustomizecainjectionpatch + +# the following config is for teaching kustomize how to do kustomization for CRDs. +configurations: +- kustomizeconfig.yaml diff --git a/internal/controller/controllers/agent_controller_test.go b/internal/controller/controllers/agent_controller_test.go index 4416bc29a70..c9ca9ae2f77 100644 --- a/internal/controller/controllers/agent_controller_test.go +++ b/internal/controller/controllers/agent_controller_test.go @@ -125,7 +125,7 @@ var _ = Describe("agent reconcile", func() { It("cluster not found in database", func() { host := newAgent("host", testNamespace, v1beta1.AgentSpec{ClusterDeploymentName: &v1beta1.ClusterReference{Name: "clusterDeployment", Namespace: testNamespace}}) Expect(c.Create(ctx, host)).To(BeNil()) - clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "pull-secret")) + clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "test-cluster-aci", "pull-secret")) Expect(c.Create(ctx, clusterDeployment)).To(BeNil()) mockInstallerInternal.EXPECT().GetClusterByKubeKey(gomock.Any()).Return(nil, gorm.ErrRecordNotFound).Times(1) result, err := hr.Reconcile(ctx, newHostRequest(host)) @@ -147,7 +147,7 @@ var _ = Describe("agent reconcile", func() { It("error getting cluster from database", func() { host := newAgent("host", testNamespace, v1beta1.AgentSpec{ClusterDeploymentName: &v1beta1.ClusterReference{Name: "clusterDeployment", Namespace: testNamespace}}) Expect(c.Create(ctx, host)).To(BeNil()) - clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "pull-secret")) + clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "test-cluster-aci", "pull-secret")) Expect(c.Create(ctx, clusterDeployment)).To(BeNil()) errString := "Error getting Cluster" mockInstallerInternal.EXPECT().GetClusterByKubeKey(gomock.Any()).Return(nil, common.NewApiError(http.StatusInternalServerError, @@ -170,7 +170,7 @@ var _ = Describe("agent reconcile", func() { It("host not found in cluster", func() { host := newAgent("host", testNamespace, v1beta1.AgentSpec{ClusterDeploymentName: &v1beta1.ClusterReference{Name: "clusterDeployment", Namespace: testNamespace}}) - clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "pull-secret")) + clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "test-cluster-aci", "pull-secret")) Expect(c.Create(ctx, clusterDeployment)).To(BeNil()) mockInstallerInternal.EXPECT().GetClusterByKubeKey(gomock.Any()).Return(backEndCluster, nil) Expect(c.Create(ctx, host)).To(BeNil()) @@ -219,7 +219,7 @@ var _ = Describe("agent reconcile", func() { host.Spec.Hostname = newHostName host.Spec.Role = models.HostRole(newRole) host.Spec.InstallationDiskID = newInstallDiskPath - clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "pull-secret")) + clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "test-cluster-aci", "pull-secret")) Expect(c.Create(ctx, clusterDeployment)).To(BeNil()) mockInstallerInternal.EXPECT().GetClusterByKubeKey(gomock.Any()).Return(backEndCluster, nil) mockInstallerInternal.EXPECT().GetCommonHostInternal(gomock.Any(), gomock.Any(), gomock.Any()).Return(&common.Host{}, nil) @@ -265,7 +265,7 @@ var _ = Describe("agent reconcile", func() { host := newAgent(hostId.String(), testNamespace, v1beta1.AgentSpec{ClusterDeploymentName: &v1beta1.ClusterReference{Name: "clusterDeployment", Namespace: testNamespace}}) host.Spec.InstallationDiskID = newInstallDiskPath - clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "pull-secret")) + clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "test-cluster-aci", "pull-secret")) Expect(c.Create(ctx, clusterDeployment)).To(BeNil()) mockInstallerInternal.EXPECT().GetClusterByKubeKey(gomock.Any()).Return(backEndCluster, nil) mockInstallerInternal.EXPECT().GetCommonHostInternal(gomock.Any(), gomock.Any(), gomock.Any()).Return(&common.Host{}, nil) @@ -300,7 +300,7 @@ var _ = Describe("agent reconcile", func() { host := newAgent(hostId.String(), testNamespace, v1beta1.AgentSpec{ClusterDeploymentName: &v1beta1.ClusterReference{Name: "clusterDeployment", Namespace: testNamespace}}) host.Spec.Hostname = newHostName host.Spec.Role = models.HostRole(newRole) - clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "pull-secret")) + clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "test-cluster-aci", "pull-secret")) Expect(c.Create(ctx, clusterDeployment)).To(BeNil()) mockInstallerInternal.EXPECT().GetClusterByKubeKey(gomock.Any()).Return(backEndCluster, nil) mockInstallerInternal.EXPECT().GetCommonHostInternal(gomock.Any(), gomock.Any(), gomock.Any()).Return(&common.Host{}, nil) @@ -344,7 +344,7 @@ var _ = Describe("agent reconcile", func() { host := newAgent(hostId.String(), testNamespace, v1beta1.AgentSpec{ClusterDeploymentName: &v1beta1.ClusterReference{Name: "clusterDeployment", Namespace: testNamespace}}) host.Spec.Approved = true - clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "pull-secret")) + clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "test-cluster-aci", "pull-secret")) Expect(c.Create(ctx, clusterDeployment)).To(BeNil()) mockInstallerInternal.EXPECT().GetClusterByKubeKey(gomock.Any()).Return(backEndCluster, nil) mockInstallerInternal.EXPECT().GetCommonHostInternal(gomock.Any(), gomock.Any(), gomock.Any()).Return(&common.Host{Approved: false}, nil) @@ -379,7 +379,7 @@ var _ = Describe("agent reconcile", func() { By("Reconcile without setting ignition override, validate update ignition override didn't run") host := newAgent(hostId.String(), testNamespace, v1beta1.AgentSpec{ClusterDeploymentName: &v1beta1.ClusterReference{Name: "clusterDeployment", Namespace: testNamespace}}) - clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "pull-secret")) + clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "test-cluster-aci", "pull-secret")) Expect(c.Create(ctx, clusterDeployment)).To(BeNil()) mockInstallerInternal.EXPECT().GetCommonHostInternal(gomock.Any(), gomock.Any(), gomock.Any()).Return(&common.Host{Approved: false}, nil) Expect(c.Create(ctx, host)).To(BeNil()) @@ -433,7 +433,7 @@ var _ = Describe("agent reconcile", func() { mockInstallerInternal.EXPECT().GetClusterByKubeKey(gomock.Any()).Return(backEndCluster, nil).AnyTimes() host := newAgent(hostId.String(), testNamespace, v1beta1.AgentSpec{ClusterDeploymentName: &v1beta1.ClusterReference{Name: "clusterDeployment", Namespace: testNamespace}}) - clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "pull-secret")) + clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "test-cluster-aci", "pull-secret")) Expect(c.Create(ctx, clusterDeployment)).To(BeNil()) By("Reconcile with ignition config, UpdateHostIgnitionInternal returns error") @@ -468,7 +468,7 @@ var _ = Describe("agent reconcile", func() { By("Reconcile without setting args, validate update installer args didn't run") host := newAgent(hostId.String(), testNamespace, v1beta1.AgentSpec{ClusterDeploymentName: &v1beta1.ClusterReference{Name: "clusterDeployment", Namespace: testNamespace}}) - clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "pull-secret")) + clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "test-cluster-aci", "pull-secret")) Expect(c.Create(ctx, clusterDeployment)).To(BeNil()) // mockInstallerInternal.EXPECT().GetClusterByKubeKey(gomock.Any()).Return(backEndCluster, nil) mockInstallerInternal.EXPECT().GetCommonHostInternal(gomock.Any(), gomock.Any(), gomock.Any()).Return(&common.Host{Approved: false}, nil) @@ -541,7 +541,7 @@ var _ = Describe("agent reconcile", func() { mockInstallerInternal.EXPECT().GetClusterByKubeKey(gomock.Any()).Return(backEndCluster, nil).AnyTimes() host := newAgent(hostId.String(), testNamespace, v1beta1.AgentSpec{ClusterDeploymentName: &v1beta1.ClusterReference{Name: "clusterDeployment", Namespace: testNamespace}}) - clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "pull-secret")) + clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "test-cluster-aci", "pull-secret")) Expect(c.Create(ctx, clusterDeployment)).To(BeNil()) By("Reconcile with bad json in installer args, validate UpdateHostInstallerArgsInternal didn't run") @@ -608,7 +608,7 @@ var _ = Describe("agent reconcile", func() { }}} host := newAgent(hostId.String(), testNamespace, v1beta1.AgentSpec{ClusterDeploymentName: &v1beta1.ClusterReference{Name: "clusterDeployment", Namespace: testNamespace}}) - clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "pull-secret")) + clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "test-cluster-aci", "pull-secret")) Expect(c.Create(ctx, clusterDeployment)).To(BeNil()) mockInstallerInternal.EXPECT().GetClusterByKubeKey(gomock.Any()).Return(backEndCluster, nil) mockInstallerInternal.EXPECT().GetCommonHostInternal(gomock.Any(), gomock.Any(), gomock.Any()).Return(&common.Host{}, nil) @@ -662,7 +662,7 @@ var _ = Describe("TestConditions", func() { ID: &hostId, }, }}} - clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "pull-secret")) + clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "test-cluster-aci", "pull-secret")) Expect(c.Create(ctx, clusterDeployment)).To(BeNil()) mockInstallerInternal.EXPECT().GetClusterByKubeKey(gomock.Any()).Return(backEndCluster, nil) mockInstallerInternal.EXPECT().GetCommonHostInternal(gomock.Any(), gomock.Any(), gomock.Any()).Return(&common.Host{}, nil) diff --git a/internal/controller/controllers/bmh_agent_controller_test.go b/internal/controller/controllers/bmh_agent_controller_test.go index 9cb68158cd3..a19c8b955ce 100644 --- a/internal/controller/controllers/bmh_agent_controller_test.go +++ b/internal/controller/controllers/bmh_agent_controller_test.go @@ -160,10 +160,10 @@ var _ = Describe("bmac reconcile", func() { Expect(c.Create(ctx, agentCluster2)).To(Succeed()) pullSecretName := "pull-secret" - defaultClusterSpec1 := getDefaultClusterDeploymentSpec(cluster1Name, pullSecretName) + defaultClusterSpec1 := getDefaultClusterDeploymentSpec(cluster1Name, "test-cluster-aci", pullSecretName) clusterDeployment1 = newClusterDeployment(cluster1Name, testNamespace, defaultClusterSpec1) Expect(c.Create(ctx, clusterDeployment1)).To(Succeed()) - defaultClusterSpec2 := getDefaultClusterDeploymentSpec(cluster2Name, pullSecretName) + defaultClusterSpec2 := getDefaultClusterDeploymentSpec(cluster2Name, "test-cluster-aci", pullSecretName) clusterDeployment2 = newClusterDeployment(cluster2Name, testNamespace, defaultClusterSpec2) Expect(c.Create(ctx, clusterDeployment2)).To(Succeed()) @@ -196,7 +196,7 @@ var _ = Describe("bmac reconcile", func() { It("should return nothing if agent does not match cluster deployment name", func() { clusterName := "not-matching-agents-cluster-name" - defaultClusterSpec := getDefaultClusterDeploymentSpec(clusterName, "test-pull") + defaultClusterSpec := getDefaultClusterDeploymentSpec(clusterName, "test-cluster-aci", "test-pull") clusterDeploymentNotMatching := newClusterDeployment(clusterName, testNamespace, defaultClusterSpec) agents := bmhr.findAgentsByClusterDeployment(context.Background(), clusterDeploymentNotMatching) Expect(len(agents)).To(Equal(0)) @@ -583,7 +583,7 @@ var _ = Describe("bmac reconcile", func() { host = newBMH("bmh-reconcile", &bmh_v1alpha1.BareMetalHostSpec{Image: image, BootMACAddress: macStr, BMC: bmh_v1alpha1.BMCDetails{CredentialsName: fmt.Sprintf(adminKubeConfigStringTemplate, clusterName)}}) Expect(c.Create(ctx, host)).To(BeNil()) - defaultClusterSpec := getDefaultClusterDeploymentSpec(clusterName, pullSecretName) + defaultClusterSpec := getDefaultClusterDeploymentSpec(clusterName, "test-cluster-aci", pullSecretName) cluster = newClusterDeployment(clusterName, testNamespace, defaultClusterSpec) cluster.Spec.Installed = true Expect(c.Create(ctx, cluster)).ShouldNot(HaveOccurred()) diff --git a/internal/controller/controllers/clusterdeployments_controller.go b/internal/controller/controllers/clusterdeployments_controller.go index 84c039f4df3..ca3fcc1cbbb 100644 --- a/internal/controller/controllers/clusterdeployments_controller.go +++ b/internal/controller/controllers/clusterdeployments_controller.go @@ -31,6 +31,7 @@ import ( "github.com/openshift/assisted-service/internal/bminventory" "github.com/openshift/assisted-service/internal/cluster" "github.com/openshift/assisted-service/internal/common" + hiveext "github.com/openshift/assisted-service/internal/controller/api/hiveextension/v1beta1" "github.com/openshift/assisted-service/internal/controller/api/v1beta1" "github.com/openshift/assisted-service/internal/host" "github.com/openshift/assisted-service/internal/manifests" @@ -38,7 +39,6 @@ import ( "github.com/openshift/assisted-service/restapi/operations/installer" operations "github.com/openshift/assisted-service/restapi/operations/manifests" hivev1 "github.com/openshift/hive/apis/hive/v1" - "github.com/openshift/hive/apis/hive/v1/agent" "github.com/pkg/errors" "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" @@ -82,6 +82,8 @@ type ClusterDeploymentsReconciler struct { // +kubebuilder:rbac:groups=hive.openshift.io,resources=clusterdeployments/status,verbs=get;update;patch // +kubebuilder:rbac:groups=hive.openshift.io,resources=clusterdeployments/finalizers,verbs=update // +kubebuilder:rbac:groups=hive.openshift.io,resources=clusterimagesets,verbs=get;list;watch +// +kubebuilder:rbac:groups=extensions.hive.openshift.io,resources=agentclusterinstalls,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=extensions.hive.openshift.io,resources=agentclusterinstalls/status,verbs=get;update;patch func (r *ClusterDeploymentsReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { r.Log.Infof("Reconcile has been called for ClusterDeployment name=%s namespace=%s", req.Name, req.Namespace) @@ -101,39 +103,61 @@ func (r *ClusterDeploymentsReconciler) Reconcile(ctx context.Context, req ctrl.R return ctrl.Result{}, nil } + clusterInstall := &hiveext.AgentClusterInstall{} + if clusterDeployment.Spec.ClusterInstallRef == nil { + r.Log.Infof("AgentClusterInstall not set for ClusterDeployment %s", clusterDeployment.Name) + return ctrl.Result{}, nil + } + + aciName := clusterDeployment.Spec.ClusterInstallRef.Name + err = r.Get(ctx, + types.NamespacedName{ + Namespace: clusterDeployment.Namespace, + Name: aciName, + }, + clusterInstall) + if err != nil { + if k8serrors.IsNotFound(err) { + r.Log.WithField("AgentClusterInstall", aciName).Infof("AgentClusterInstall does not exist for ClusterDeployment %s", clusterDeployment.Name) + return ctrl.Result{}, nil + } + r.Log.WithError(err).Errorf("Failed to get AgentClusterInstall %s", aciName) + return ctrl.Result{Requeue: true}, err + } + cluster, err := r.Installer.GetClusterByKubeKey(req.NamespacedName) if errors.Is(err, gorm.ErrRecordNotFound) { - if !clusterDeployment.Spec.Installed { - return r.createNewCluster(ctx, req.NamespacedName, clusterDeployment) + if !isInstalled(clusterDeployment, clusterInstall) { + return r.createNewCluster(ctx, req.NamespacedName, clusterDeployment, clusterInstall) } - if !r.isSNO(clusterDeployment) { - return r.createNewDay2Cluster(ctx, req.NamespacedName, clusterDeployment) + if !r.isSNO(clusterInstall) { + return r.createNewDay2Cluster(ctx, req.NamespacedName, clusterDeployment, clusterInstall) } // cluster is installed and SNO nothing to do return ctrl.Result{Requeue: false}, nil } if err != nil { - return r.updateStatus(ctx, clusterDeployment, cluster, err) + return r.updateStatus(ctx, clusterInstall, cluster, err) } // check for updates from user, compare spec and update if needed - err = r.updateIfNeeded(ctx, clusterDeployment, cluster) + err = r.updateIfNeeded(ctx, clusterDeployment, clusterInstall, cluster) if err != nil { - return r.updateStatus(ctx, clusterDeployment, cluster, err) + return r.updateStatus(ctx, clusterInstall, cluster, err) } // check for install config overrides and update if needed err = r.updateInstallConfigOverrides(ctx, clusterDeployment, cluster) if err != nil { - return r.updateStatus(ctx, clusterDeployment, cluster, err) + return r.updateStatus(ctx, clusterInstall, cluster, err) } // In case the Cluster is a Day 1 cluster and is installed, update the Metadata and create secrets for credentials if *cluster.Status == models.ClusterStatusInstalled && swag.StringValue(cluster.Kind) == models.ClusterKindCluster { if !clusterDeployment.Spec.Installed { - err = r.updateClusterMetadata(ctx, clusterDeployment, cluster) + err = r.updateClusterMetadata(ctx, clusterDeployment, cluster, clusterInstall) if err != nil { - return r.updateStatus(ctx, clusterDeployment, cluster, err) + return r.updateStatus(ctx, clusterInstall, cluster, err) } } // Delete Day1 Cluster @@ -141,38 +165,46 @@ func (r *ClusterDeploymentsReconciler) Reconcile(ctx context.Context, req ctrl.R ClusterID: *cluster.ID, }) if err != nil { - return r.updateStatus(ctx, clusterDeployment, cluster, err) + return r.updateStatus(ctx, clusterInstall, cluster, err) } - if !r.isSNO(clusterDeployment) { + if !r.isSNO(clusterInstall) { //Create Day2 cluster - return r.createNewDay2Cluster(ctx, req.NamespacedName, clusterDeployment) + return r.createNewDay2Cluster(ctx, req.NamespacedName, clusterDeployment, clusterInstall) } - return r.updateStatus(ctx, clusterDeployment, cluster, err) + return r.updateStatus(ctx, clusterInstall, cluster, err) } if swag.StringValue(cluster.Kind) == models.ClusterKindCluster { // Day 1 - return r.installDay1(ctx, clusterDeployment, cluster) + return r.installDay1(ctx, clusterDeployment, clusterInstall, cluster) } else if swag.StringValue(cluster.Kind) == models.ClusterKindAddHostsCluster { // Day 2 - return r.installDay2Hosts(ctx, clusterDeployment, cluster) + return r.installDay2Hosts(ctx, clusterDeployment, clusterInstall, cluster) } - return r.updateStatus(ctx, clusterDeployment, cluster, nil) + return r.updateStatus(ctx, clusterInstall, cluster, nil) } -func (r *ClusterDeploymentsReconciler) installDay1(ctx context.Context, clusterDeployment *hivev1.ClusterDeployment, cluster *common.Cluster) (ctrl.Result, error) { - ready, err := r.isReadyForInstallation(ctx, clusterDeployment, cluster) +func isInstalled(clusterDeployment *hivev1.ClusterDeployment, clusterInstall *hiveext.AgentClusterInstall) bool { + if clusterDeployment.Spec.Installed { + return true + } + cond := FindStatusCondition(clusterInstall.Status.Conditions, ClusterCompletedCondition) + return cond != nil && cond.Reason == InstalledReason +} + +func (r *ClusterDeploymentsReconciler) installDay1(ctx context.Context, clusterDeployment *hivev1.ClusterDeployment, clusterInstall *hiveext.AgentClusterInstall, cluster *common.Cluster) (ctrl.Result, error) { + ready, err := r.isReadyForInstallation(ctx, clusterDeployment, clusterInstall, cluster) if err != nil { - return r.updateStatus(ctx, clusterDeployment, cluster, err) + return r.updateStatus(ctx, clusterInstall, cluster, err) } if ready { // create custom manifests if needed before installation - err = r.addCustomManifests(ctx, clusterDeployment, cluster) + err = r.addCustomManifests(ctx, clusterInstall, cluster) if err != nil { - _, _ = r.updateStatus(ctx, clusterDeployment, cluster, err) + _, _ = r.updateStatus(ctx, clusterInstall, cluster, err) // We decided to requeue with one minute timeout in order to give user a chance to fix manifest // this timeout allows us not to run reconcile too much time and // still have a nice feedback when user will fix the error @@ -185,32 +217,32 @@ func (r *ClusterDeploymentsReconciler) installDay1(ctx context.Context, clusterD ClusterID: *cluster.ID, }) if err != nil { - return r.updateStatus(ctx, clusterDeployment, cluster, err) + return r.updateStatus(ctx, clusterInstall, cluster, err) } - return r.updateStatus(ctx, clusterDeployment, ic, err) + return r.updateStatus(ctx, clusterInstall, ic, err) } - return r.updateStatus(ctx, clusterDeployment, cluster, nil) + return r.updateStatus(ctx, clusterInstall, cluster, nil) } -func (r *ClusterDeploymentsReconciler) installDay2Hosts(ctx context.Context, clusterDeployment *hivev1.ClusterDeployment, cluster *common.Cluster) (ctrl.Result, error) { +func (r *ClusterDeploymentsReconciler) installDay2Hosts(ctx context.Context, clusterDeployment *hivev1.ClusterDeployment, clusterInstall *hiveext.AgentClusterInstall, cluster *common.Cluster) (ctrl.Result, error) { for _, h := range cluster.Hosts { commonh, err := r.Installer.GetCommonHostInternal(ctx, cluster.ID.String(), h.ID.String()) if err != nil { - return r.updateStatus(ctx, clusterDeployment, cluster, err) + return r.updateStatus(ctx, clusterInstall, cluster, err) } if r.HostApi.IsInstallable(h) && commonh.Approved { r.Log.Infof("Installing Day2 host %s in %s %s", *h.ID, clusterDeployment.Name, clusterDeployment.Namespace) err = r.Installer.InstallSingleDay2HostInternal(ctx, *cluster.ID, *h.ID) if err != nil { - return r.updateStatus(ctx, clusterDeployment, cluster, err) + return r.updateStatus(ctx, clusterInstall, cluster, err) } } } - return r.updateStatus(ctx, clusterDeployment, cluster, nil) + return r.updateStatus(ctx, clusterInstall, cluster, nil) } -func (r *ClusterDeploymentsReconciler) updateClusterMetadata(ctx context.Context, cluster *hivev1.ClusterDeployment, c *common.Cluster) error { +func (r *ClusterDeploymentsReconciler) updateClusterMetadata(ctx context.Context, cluster *hivev1.ClusterDeployment, c *common.Cluster, clusterInstall *hiveext.AgentClusterInstall) error { s, err := r.ensureAdminPasswordSecret(ctx, cluster, c) if err != nil { @@ -220,9 +252,9 @@ func (r *ClusterDeploymentsReconciler) updateClusterMetadata(ctx context.Context if err != nil { return err } - cluster.Spec.Installed = true - cluster.Spec.ClusterMetadata = &hivev1.ClusterMetadata{ + clusterInstall.Spec.ClusterMetadata = &hivev1.ClusterMetadata{ ClusterID: c.OpenshiftClusterID.String(), + InfraID: string(*c.ID), AdminPasswordSecretRef: corev1.LocalObjectReference{ Name: s.Name, }, @@ -230,7 +262,7 @@ func (r *ClusterDeploymentsReconciler) updateClusterMetadata(ctx context.Context Name: k.Name, }, } - return r.Update(ctx, cluster) + return r.Update(ctx, clusterInstall) } func (r *ClusterDeploymentsReconciler) ensureAdminPasswordSecret(ctx context.Context, cluster *hivev1.ClusterDeployment, c *common.Cluster) (*corev1.Secret, error) { @@ -305,7 +337,7 @@ func (r *ClusterDeploymentsReconciler) createClusterCredentialSecret(ctx context return s, r.Create(ctx, s) } -func (r *ClusterDeploymentsReconciler) isReadyForInstallation(ctx context.Context, cluster *hivev1.ClusterDeployment, c *common.Cluster) (bool, error) { +func (r *ClusterDeploymentsReconciler) isReadyForInstallation(ctx context.Context, cluster *hivev1.ClusterDeployment, clusterInstall *hiveext.AgentClusterInstall, c *common.Cluster) (bool, error) { if ready, _ := r.ClusterApi.IsReadyForInstallation(c); !ready { return false, nil } @@ -321,29 +353,31 @@ func (r *ClusterDeploymentsReconciler) isReadyForInstallation(ctx context.Contex } } - expectedHosts := cluster.Spec.Provisioning.InstallStrategy.Agent.ProvisionRequirements.ControlPlaneAgents + - cluster.Spec.Provisioning.InstallStrategy.Agent.ProvisionRequirements.WorkerAgents + expectedHosts := clusterInstall.Spec.ProvisionRequirements.ControlPlaneAgents + + clusterInstall.Spec.ProvisionRequirements.WorkerAgents return readyHosts == expectedHosts, nil } func isSupportedPlatform(cluster *hivev1.ClusterDeployment) bool { - if cluster.Spec.Platform.AgentBareMetal == nil || - cluster.Spec.Provisioning.InstallStrategy == nil || - cluster.Spec.Provisioning.InstallStrategy.Agent == nil { + if cluster.Spec.ClusterInstallRef == nil || + cluster.Spec.ClusterInstallRef.Group != hiveext.Group || + cluster.Spec.ClusterInstallRef.Kind != "AgentClusterInstall" { return false } return true } -func isUserManagedNetwork(cluster *hivev1.ClusterDeployment) bool { - if cluster.Spec.Provisioning.InstallStrategy.Agent.ProvisionRequirements.ControlPlaneAgents == 1 && - cluster.Spec.Provisioning.InstallStrategy.Agent.ProvisionRequirements.WorkerAgents == 0 { +func isUserManagedNetwork(clusterInstall *hiveext.AgentClusterInstall) bool { + if clusterInstall.Spec.ProvisionRequirements.ControlPlaneAgents == 1 && + clusterInstall.Spec.ProvisionRequirements.WorkerAgents == 0 { return true } return false } -func (r *ClusterDeploymentsReconciler) updateIfNeeded(ctx context.Context, clusterDeployment *hivev1.ClusterDeployment, +func (r *ClusterDeploymentsReconciler) updateIfNeeded(ctx context.Context, + clusterDeployment *hivev1.ClusterDeployment, + clusterInstall *hiveext.AgentClusterInstall, cluster *common.Cluster) error { update := false @@ -363,29 +397,28 @@ func (r *ClusterDeploymentsReconciler) updateIfNeeded(ctx context.Context, clust updateString(spec.ClusterName, cluster.Name, ¶ms.Name) updateString(spec.BaseDomain, cluster.BaseDNSDomain, ¶ms.BaseDNSDomain) - if len(spec.Provisioning.InstallStrategy.Agent.Networking.ClusterNetwork) > 0 { - updateString(spec.Provisioning.InstallStrategy.Agent.Networking.ClusterNetwork[0].CIDR, cluster.ClusterNetworkCidr, ¶ms.ClusterNetworkCidr) - hostPrefix := int64(spec.Provisioning.InstallStrategy.Agent.Networking.ClusterNetwork[0].HostPrefix) + if len(clusterInstall.Spec.Networking.ClusterNetwork) > 0 { + updateString(clusterInstall.Spec.Networking.ClusterNetwork[0].CIDR, cluster.ClusterNetworkCidr, ¶ms.ClusterNetworkCidr) + hostPrefix := int64(clusterInstall.Spec.Networking.ClusterNetwork[0].HostPrefix) if hostPrefix > 0 && hostPrefix != cluster.ClusterNetworkHostPrefix { params.ClusterNetworkHostPrefix = swag.Int64(hostPrefix) update = true } } - if len(spec.Provisioning.InstallStrategy.Agent.Networking.ServiceNetwork) > 0 { - updateString(spec.Provisioning.InstallStrategy.Agent.Networking.ServiceNetwork[0], cluster.ServiceNetworkCidr, ¶ms.ServiceNetworkCidr) + if len(clusterInstall.Spec.Networking.ServiceNetwork) > 0 { + updateString(clusterInstall.Spec.Networking.ServiceNetwork[0], cluster.ServiceNetworkCidr, ¶ms.ServiceNetworkCidr) } - if len(spec.Provisioning.InstallStrategy.Agent.Networking.MachineNetwork) > 0 { - updateString(spec.Provisioning.InstallStrategy.Agent.Networking.MachineNetwork[0].CIDR, cluster.MachineNetworkCidr, ¶ms.MachineNetworkCidr) + if len(clusterInstall.Spec.Networking.MachineNetwork) > 0 { + updateString(clusterInstall.Spec.Networking.MachineNetwork[0].CIDR, cluster.MachineNetworkCidr, ¶ms.MachineNetworkCidr) } - updateString(spec.Platform.AgentBareMetal.APIVIP, cluster.APIVip, ¶ms.APIVip) - updateString(spec.Platform.AgentBareMetal.IngressVIP, cluster.IngressVip, ¶ms.IngressVip) - + updateString(clusterInstall.Spec.APIVIP, cluster.APIVip, ¶ms.APIVip) + updateString(clusterInstall.Spec.IngressVIP, cluster.IngressVip, ¶ms.IngressVip) // Trim key before comapring as done in RegisterClusterInternal - sshPublicKey := strings.TrimSpace(spec.Provisioning.InstallStrategy.Agent.SSHPublicKey) + sshPublicKey := strings.TrimSpace(clusterInstall.Spec.SSHPublicKey) updateString(sshPublicKey, cluster.SSHPublicKey, ¶ms.SSHPublicKey) - if userManagedNetwork := isUserManagedNetwork(clusterDeployment); userManagedNetwork != swag.BoolValue(cluster.UserManagedNetworking) { + if userManagedNetwork := isUserManagedNetwork(clusterInstall); userManagedNetwork != swag.BoolValue(cluster.UserManagedNetworking) { params.UserManagedNetworking = swag.Bool(userManagedNetwork) } pullSecretData, err := getPullSecret(ctx, r.Client, spec.PullSecretRef.Name, clusterDeployment.Namespace) @@ -447,11 +480,11 @@ func (r *ClusterDeploymentsReconciler) updateInstallConfigOverrides(ctx context. } func (r *ClusterDeploymentsReconciler) syncManifests(ctx context.Context, cluster *common.Cluster, - clusterDeployment *hivev1.ClusterDeployment, alreadyCreatedManifests models.ListManifests) error { + clusterInstall *hiveext.AgentClusterInstall, alreadyCreatedManifests models.ListManifests) error { r.Log.Debugf("Going to sync list of given with already created manifests") - manifestsFromConfigMap, err := r.getClusterDeploymentManifest(ctx, clusterDeployment) + manifestsFromConfigMap, err := r.getClusterDeploymentManifest(ctx, clusterInstall) if err != nil { return err } @@ -487,53 +520,54 @@ func (r *ClusterDeploymentsReconciler) syncManifests(ctx context.Context, cluste return nil } -func (r *ClusterDeploymentsReconciler) getClusterDeploymentManifest(ctx context.Context, clusterDeployment *hivev1.ClusterDeployment) (map[string]string, error) { +func (r *ClusterDeploymentsReconciler) getClusterDeploymentManifest(ctx context.Context, clusterInstall *hiveext.AgentClusterInstall) (map[string]string, error) { configuredManifests := &corev1.ConfigMap{} configuredManifests.Data = map[string]string{} // get manifests from configmap if we have reference for it - if clusterDeployment.Spec.Provisioning.ManifestsConfigMapRef != nil { - err := r.Get(ctx, types.NamespacedName{Namespace: clusterDeployment.Namespace, - Name: clusterDeployment.Spec.Provisioning.ManifestsConfigMapRef.Name}, configuredManifests) + if clusterInstall.Spec.ManifestsConfigMapRef != nil { + err := r.Get(ctx, types.NamespacedName{Namespace: clusterInstall.Namespace, + Name: clusterInstall.Spec.ManifestsConfigMapRef.Name}, configuredManifests) if err != nil { r.Log.WithError(err).Errorf("Failed to get configmap %s in %s", - clusterDeployment.Spec.Provisioning.ManifestsConfigMapRef.Name, clusterDeployment.Namespace) + clusterInstall.Spec.ManifestsConfigMapRef.Name, clusterInstall.Namespace) return nil, err } } return configuredManifests.Data, nil } -func (r *ClusterDeploymentsReconciler) addCustomManifests(ctx context.Context, clusterDeployment *hivev1.ClusterDeployment, +func (r *ClusterDeploymentsReconciler) addCustomManifests(ctx context.Context, clusterInstall *hiveext.AgentClusterInstall, cluster *common.Cluster) error { alreadyCreatedManifests, err := r.Manifests.ListClusterManifestsInternal(ctx, operations.ListClusterManifestsParams{ ClusterID: *cluster.ID, }) if err != nil { - r.Log.WithError(err).Errorf("Failed to list manifests for %q cluster deployment", clusterDeployment.Name) + r.Log.WithError(err).Errorf("Failed to list manifests for %q cluster install", clusterInstall.Name) return err } // if reference to manifests was deleted from cluster deployment // but we already added some in previous reconcile loop, we want to clean them. // if no reference were provided we will delete all manifests that were in the list - if clusterDeployment.Spec.Provisioning.ManifestsConfigMapRef == nil && len(alreadyCreatedManifests) == 0 { + if clusterInstall.Spec.ManifestsConfigMapRef == nil && len(alreadyCreatedManifests) == 0 { r.Log.Debugf("Nothing to do, skipping manifest creation") return nil } - return r.syncManifests(ctx, cluster, clusterDeployment, alreadyCreatedManifests) + return r.syncManifests(ctx, cluster, clusterInstall, alreadyCreatedManifests) } -func (r *ClusterDeploymentsReconciler) isSNO(cluster *hivev1.ClusterDeployment) bool { - return cluster.Spec.Provisioning.InstallStrategy.Agent.ProvisionRequirements.ControlPlaneAgents == 1 && - cluster.Spec.Provisioning.InstallStrategy.Agent.ProvisionRequirements.WorkerAgents == 0 +func (r *ClusterDeploymentsReconciler) isSNO(clusterInstall *hiveext.AgentClusterInstall) bool { + return clusterInstall.Spec.ProvisionRequirements.ControlPlaneAgents == 1 && + clusterInstall.Spec.ProvisionRequirements.WorkerAgents == 0 } func (r *ClusterDeploymentsReconciler) createNewCluster( ctx context.Context, key types.NamespacedName, - clusterDeployment *hivev1.ClusterDeployment) (ctrl.Result, error) { + clusterDeployment *hivev1.ClusterDeployment, + clusterInstall *hiveext.AgentClusterInstall) (ctrl.Result, error) { r.Log.Infof("Creating a new clusterDeployment %s %s", clusterDeployment.Name, clusterDeployment.Namespace) spec := clusterDeployment.Spec @@ -541,13 +575,13 @@ func (r *ClusterDeploymentsReconciler) createNewCluster( pullSecret, err := getPullSecret(ctx, r.Client, spec.PullSecretRef.Name, key.Namespace) if err != nil { r.Log.WithError(err).Error("failed to get pull secret") - return r.updateStatus(ctx, clusterDeployment, nil, err) + return r.updateStatus(ctx, clusterInstall, nil, err) } - openshiftVersion, err := r.addOpenshiftVersion(ctx, spec, pullSecret) + openshiftVersion, err := r.addOpenshiftVersion(ctx, clusterInstall.Spec, pullSecret) if err != nil { r.Log.WithError(err).Error("failed to add OCP version") - return r.updateStatus(ctx, clusterDeployment, nil, err) + return r.updateStatus(ctx, clusterInstall, nil, err) } clusterParams := &models.ClusterCreateParams{ @@ -557,22 +591,22 @@ func (r *ClusterDeploymentsReconciler) createNewCluster( OlmOperators: nil, // TODO: handle operators PullSecret: swag.String(pullSecret), VipDhcpAllocation: swag.Bool(false), - IngressVip: spec.Platform.AgentBareMetal.IngressVIP, - SSHPublicKey: spec.Provisioning.InstallStrategy.Agent.SSHPublicKey, - UserManagedNetworking: swag.Bool(isUserManagedNetwork(clusterDeployment)), + IngressVip: clusterInstall.Spec.IngressVIP, + SSHPublicKey: clusterInstall.Spec.SSHPublicKey, + UserManagedNetworking: swag.Bool(isUserManagedNetwork(clusterInstall)), } - if len(spec.Provisioning.InstallStrategy.Agent.Networking.ClusterNetwork) > 0 { - clusterParams.ClusterNetworkCidr = swag.String(spec.Provisioning.InstallStrategy.Agent.Networking.ClusterNetwork[0].CIDR) - clusterParams.ClusterNetworkHostPrefix = int64(spec.Provisioning.InstallStrategy.Agent.Networking.ClusterNetwork[0].HostPrefix) + if len(clusterInstall.Spec.Networking.ClusterNetwork) > 0 { + clusterParams.ClusterNetworkCidr = swag.String(clusterInstall.Spec.Networking.ClusterNetwork[0].CIDR) + clusterParams.ClusterNetworkHostPrefix = int64(clusterInstall.Spec.Networking.ClusterNetwork[0].HostPrefix) } - if len(spec.Provisioning.InstallStrategy.Agent.Networking.ServiceNetwork) > 0 { - clusterParams.ServiceNetworkCidr = swag.String(spec.Provisioning.InstallStrategy.Agent.Networking.ServiceNetwork[0]) + if len(clusterInstall.Spec.Networking.ServiceNetwork) > 0 { + clusterParams.ServiceNetworkCidr = swag.String(clusterInstall.Spec.Networking.ServiceNetwork[0]) } - if spec.Provisioning.InstallStrategy.Agent.ProvisionRequirements.ControlPlaneAgents == 1 && - spec.Provisioning.InstallStrategy.Agent.ProvisionRequirements.WorkerAgents == 0 { + if clusterInstall.Spec.ProvisionRequirements.ControlPlaneAgents == 1 && + clusterInstall.Spec.ProvisionRequirements.WorkerAgents == 0 { clusterParams.HighAvailabilityMode = swag.String(HighAvailabilityModeNone) } @@ -580,13 +614,14 @@ func (r *ClusterDeploymentsReconciler) createNewCluster( NewClusterParams: clusterParams, }) - return r.updateStatus(ctx, clusterDeployment, c, err) + return r.updateStatus(ctx, clusterInstall, c, err) } func (r *ClusterDeploymentsReconciler) createNewDay2Cluster( ctx context.Context, key types.NamespacedName, - clusterDeployment *hivev1.ClusterDeployment) (ctrl.Result, error) { + clusterDeployment *hivev1.ClusterDeployment, + clusterInstall *hiveext.AgentClusterInstall) (ctrl.Result, error) { r.Log.Infof("Creating a new Day2 Cluster %s %s", clusterDeployment.Name, clusterDeployment.Namespace) spec := clusterDeployment.Spec @@ -596,13 +631,13 @@ func (r *ClusterDeploymentsReconciler) createNewDay2Cluster( pullSecret, err := getPullSecret(ctx, r.Client, spec.PullSecretRef.Name, key.Namespace) if err != nil { r.Log.WithError(err).Error("failed to get pull secret") - return r.updateStatus(ctx, clusterDeployment, nil, err) + return r.updateStatus(ctx, clusterInstall, nil, err) } - openshiftVersion, err := r.addOpenshiftVersion(ctx, spec, pullSecret) + openshiftVersion, err := r.addOpenshiftVersion(ctx, clusterInstall.Spec, pullSecret) if err != nil { r.Log.WithError(err).Error("failed to add OCP version") - return r.updateStatus(ctx, clusterDeployment, nil, err) + return r.updateStatus(ctx, clusterInstall, nil, err) } clusterParams := &models.AddHostsClusterCreateParams{ @@ -616,26 +651,20 @@ func (r *ClusterDeploymentsReconciler) createNewDay2Cluster( NewAddHostsClusterParams: clusterParams, }) - return r.updateStatus(ctx, clusterDeployment, c, err) + return r.updateStatus(ctx, clusterInstall, c, err) } -func (r *ClusterDeploymentsReconciler) getReleaseImage(ctx context.Context, spec hivev1.ClusterDeploymentSpec) (string, error) { - var releaseImage string - if spec.Provisioning.ReleaseImage != "" { - releaseImage = spec.Provisioning.ReleaseImage - } else { - var err error - releaseImage, err = getReleaseImage(ctx, r.Client, spec.Provisioning.ImageSetRef.Name) - if err != nil { - return "", err - } +func (r *ClusterDeploymentsReconciler) getReleaseImage(ctx context.Context, spec hiveext.AgentClusterInstallSpec) (string, error) { + releaseImage, err := getReleaseImage(ctx, r.Client, spec.ImageSetRef.Name) + if err != nil { + return "", err } return releaseImage, nil } func (r *ClusterDeploymentsReconciler) addOpenshiftVersion( ctx context.Context, - spec hivev1.ClusterDeploymentSpec, + spec hiveext.AgentClusterInstallSpec, pullSecret string) (*models.OpenshiftVersion, error) { releaseImage, err := r.getReleaseImage(ctx, spec) @@ -690,6 +719,7 @@ func (r *ClusterDeploymentsReconciler) SetupWithManager(mgr ctrl.Manager) error mapSecretToClusterDeployment := func(a client.Object) []reconcile.Request { clusterDeployments := &hivev1.ClusterDeploymentList{} if err := r.List(context.Background(), clusterDeployments); err != nil { + // TODO: silently ignoring error here return []reconcile.Request{} } reply := make([]reconcile.Request, 0, len(clusterDeployments.Items)) @@ -704,40 +734,50 @@ func (r *ClusterDeploymentsReconciler) SetupWithManager(mgr ctrl.Manager) error return reply } + // Reconcile the ClusterDeployment referenced by this AgentClusterInstall. + mapClusterInstallToClusterDeployment := func(a client.Object) []reconcile.Request { + aci, ok := a.(*hiveext.AgentClusterInstall) + if !ok { + r.Log.Errorf("%v was not an AgentClusterInstall", a) // shouldn't be possible + return []reconcile.Request{} + } + return []reconcile.Request{ + { + NamespacedName: types.NamespacedName{ + Namespace: aci.Namespace, + Name: aci.Spec.ClusterDeploymentRef.Name, + }, + }, + } + } + clusterDeploymentUpdates := r.CRDEventsHandler.GetClusterDeploymentUpdates() return ctrl.NewControllerManagedBy(mgr). For(&hivev1.ClusterDeployment{}). Watches(&source.Kind{Type: &corev1.Secret{}}, handler.EnqueueRequestsFromMapFunc(mapSecretToClusterDeployment)). + Watches(&source.Kind{Type: &hiveext.AgentClusterInstall{}}, handler.EnqueueRequestsFromMapFunc(mapClusterInstallToClusterDeployment)). Watches(&source.Channel{Source: clusterDeploymentUpdates}, &handler.EnqueueRequestForObject{}). Complete(r) } -// updateStatus is updating all the ClusterDeployment Conditions. +// updateStatus is updating all the AgentClusterInstall Conditions. // In case that an error has occured when trying to sync the Spec, the error (syncErr) is presented in SpecSyncedCondition. // Internal bool differentiate between backend server error (internal HTTP 5XX) and user input error (HTTP 4XXX) -func (r *ClusterDeploymentsReconciler) updateStatus(ctx context.Context, cluster *hivev1.ClusterDeployment, c *common.Cluster, syncErr error) (ctrl.Result, error) { - clusterSpecSynced(cluster, syncErr) +func (r *ClusterDeploymentsReconciler) updateStatus(ctx context.Context, clusterInstall *hiveext.AgentClusterInstall, c *common.Cluster, syncErr error) (ctrl.Result, error) { + clusterSpecSynced(clusterInstall, syncErr) if c != nil { - cluster.Status.InstallStrategy = &hivev1.InstallStrategyStatus{Agent: &agent.InstallStrategyStatus{ - ConnectivityMajorityGroups: c.ConnectivityMajorityGroups, - }} - cluster.Status.InstallStrategy.Agent.ConnectivityMajorityGroups = c.ConnectivityMajorityGroups + clusterInstall.Status.ConnectivityMajorityGroups = c.ConnectivityMajorityGroups if c.Status != nil { status := *c.Status - clusterReadyForInstallation(cluster, status) - clusterValidated(cluster, status, c) - clusterInstalled(cluster, status, swag.StringValue(c.StatusInfo)) + clusterRequirementsMet(clusterInstall, status) + clusterValidated(clusterInstall, status, c) + clusterCompleted(clusterInstall, status, swag.StringValue(c.StatusInfo)) } } else { - setClusterConditionsUnknown(cluster) - } - - if cluster.Spec.Installed { - cluster.Status.APIURL = fmt.Sprintf("https://api.%s.%s:6443", cluster.Spec.ClusterName, cluster.Spec.BaseDomain) - cluster.Status.WebConsoleURL = common.GetConsoleUrl(cluster.Spec.ClusterName, cluster.Spec.BaseDomain) + setClusterConditionsUnknown(clusterInstall) } - if updateErr := r.Status().Update(ctx, cluster); updateErr != nil { + if updateErr := r.Status().Update(ctx, clusterInstall); updateErr != nil { r.Log.WithError(updateErr).Error("failed to update ClusterDeployment Status") return ctrl.Result{Requeue: true}, nil } @@ -748,7 +788,7 @@ func (r *ClusterDeploymentsReconciler) updateStatus(ctx context.Context, cluster } // clusterSpecSynced is updating the Cluster SpecSynced Condition. -func clusterSpecSynced(cluster *hivev1.ClusterDeployment, syncErr error) { +func clusterSpecSynced(cluster *hiveext.AgentClusterInstall, syncErr error) { var condStatus corev1.ConditionStatus var reason string var msg string @@ -766,7 +806,7 @@ func clusterSpecSynced(cluster *hivev1.ClusterDeployment, syncErr error) { msg = InputErrorMsg + " " + syncErr.Error() } } - setClusterCondition(&cluster.Status.Conditions, hivev1.ClusterDeploymentCondition{ + setClusterCondition(&cluster.Status.Conditions, hivev1.ClusterInstallCondition{ Type: ClusterSpecSyncedCondition, Status: condStatus, Reason: reason, @@ -774,7 +814,7 @@ func clusterSpecSynced(cluster *hivev1.ClusterDeployment, syncErr error) { }) } -func clusterReadyForInstallation(cluster *hivev1.ClusterDeployment, status string) { +func clusterRequirementsMet(clusterInstall *hiveext.AgentClusterInstall, status string) { var condStatus corev1.ConditionStatus var reason string var msg string @@ -790,7 +830,7 @@ func clusterReadyForInstallation(cluster *hivev1.ClusterDeployment, status strin case models.ClusterStatusPreparingForInstallation, models.ClusterStatusInstalled, models.ClusterStatusInstalling, models.ClusterStatusInstallingPendingUserAction, models.ClusterStatusError, models.ClusterStatusAddingHosts, models.ClusterStatusFinalizing: - condStatus = corev1.ConditionFalse + condStatus = corev1.ConditionTrue reason = ClusterAlreadyInstallingReason msg = ClusterAlreadyInstallingMsg default: @@ -798,15 +838,15 @@ func clusterReadyForInstallation(cluster *hivev1.ClusterDeployment, status strin reason = UnknownStatusReason msg = fmt.Sprintf("%s %s", UnknownStatusMsg, status) } - setClusterCondition(&cluster.Status.Conditions, hivev1.ClusterDeploymentCondition{ - Type: ClusterReadyForInstallationCondition, + setClusterCondition(&clusterInstall.Status.Conditions, hivev1.ClusterInstallCondition{ + Type: ClusterRequirementsMetCondition, Status: condStatus, Reason: reason, Message: msg, }) } -func clusterInstalled(cluster *hivev1.ClusterDeployment, status, statusInfo string) { +func clusterCompleted(clusterInstall *hiveext.AgentClusterInstall, status, statusInfo string) { var condStatus corev1.ConditionStatus var reason string var msg string @@ -833,15 +873,15 @@ func clusterInstalled(cluster *hivev1.ClusterDeployment, status, statusInfo stri reason = UnknownStatusReason msg = fmt.Sprintf("%s %s", UnknownStatusMsg, status) } - setClusterCondition(&cluster.Status.Conditions, hivev1.ClusterDeploymentCondition{ - Type: ClusterInstalledCondition, + setClusterCondition(&clusterInstall.Status.Conditions, hivev1.ClusterInstallCondition{ + Type: ClusterCompletedCondition, Status: condStatus, Reason: reason, Message: msg, }) } -func clusterValidated(clusterDeployment *hivev1.ClusterDeployment, status string, c *common.Cluster) { +func clusterValidated(clusterInstall *hiveext.AgentClusterInstall, status string, c *common.Cluster) { failedValidationInfo := "" validationRes, err := cluster.GetValidations(c) var failures []string @@ -880,7 +920,7 @@ func clusterValidated(clusterDeployment *hivev1.ClusterDeployment, status string reason = ValidationsPassingReason msg = ClusterValidationsOKMsg } - setClusterCondition(&clusterDeployment.Status.Conditions, hivev1.ClusterDeploymentCondition{ + setClusterCondition(&clusterInstall.Status.Conditions, hivev1.ClusterInstallCondition{ Type: ClusterValidatedCondition, Status: condStatus, Reason: reason, @@ -888,21 +928,21 @@ func clusterValidated(clusterDeployment *hivev1.ClusterDeployment, status string }) } -func setClusterConditionsUnknown(clusterDeployment *hivev1.ClusterDeployment) { - setClusterCondition(&clusterDeployment.Status.Conditions, hivev1.ClusterDeploymentCondition{ +func setClusterConditionsUnknown(clusterInstall *hiveext.AgentClusterInstall) { + setClusterCondition(&clusterInstall.Status.Conditions, hivev1.ClusterInstallCondition{ Type: ClusterValidatedCondition, Status: corev1.ConditionUnknown, Reason: NotAvailableReason, Message: NotAvailableMsg, }) - setClusterCondition(&clusterDeployment.Status.Conditions, hivev1.ClusterDeploymentCondition{ - Type: ClusterReadyForInstallationCondition, + setClusterCondition(&clusterInstall.Status.Conditions, hivev1.ClusterInstallCondition{ + Type: ClusterRequirementsMetCondition, Status: corev1.ConditionUnknown, Reason: NotAvailableReason, Message: NotAvailableMsg, }) - setClusterCondition(&clusterDeployment.Status.Conditions, hivev1.ClusterDeploymentCondition{ - Type: ClusterInstalledCondition, + setClusterCondition(&clusterInstall.Status.Conditions, hivev1.ClusterInstallCondition{ + Type: ClusterCompletedCondition, Status: corev1.ConditionUnknown, Reason: NotAvailableReason, Message: NotAvailableMsg, @@ -910,9 +950,9 @@ func setClusterConditionsUnknown(clusterDeployment *hivev1.ClusterDeployment) { } // SetStatusCondition sets the corresponding condition in conditions to newCondition. -func setClusterCondition(conditions *[]hivev1.ClusterDeploymentCondition, newCondition hivev1.ClusterDeploymentCondition) { +func setClusterCondition(conditions *[]hivev1.ClusterInstallCondition, newCondition hivev1.ClusterInstallCondition) { if conditions == nil { - conditions = &[]hivev1.ClusterDeploymentCondition{} + conditions = &[]hivev1.ClusterInstallCondition{} } existingCondition := FindStatusCondition(*conditions, newCondition.Type) if existingCondition == nil { @@ -929,10 +969,9 @@ func setClusterCondition(conditions *[]hivev1.ClusterDeploymentCondition, newCon existingCondition.LastTransitionTime = metav1.NewTime(time.Now()) existingCondition.LastProbeTime = metav1.NewTime(time.Now()) } - } -func isConditionEqual(existingCond hivev1.ClusterDeploymentCondition, newCondition hivev1.ClusterDeploymentCondition) bool { +func isConditionEqual(existingCond hivev1.ClusterInstallCondition, newCondition hivev1.ClusterInstallCondition) bool { if existingCond.Type == newCondition.Type { return existingCond.Status == newCondition.Status && existingCond.Reason == newCondition.Reason && @@ -942,7 +981,7 @@ func isConditionEqual(existingCond hivev1.ClusterDeploymentCondition, newConditi } // FindStatusCondition finds the conditionType in conditions. -func FindStatusCondition(conditions []hivev1.ClusterDeploymentCondition, conditionType hivev1.ClusterDeploymentConditionType) *hivev1.ClusterDeploymentCondition { +func FindStatusCondition(conditions []hivev1.ClusterInstallCondition, conditionType string) *hivev1.ClusterInstallCondition { for i := range conditions { if conditions[i].Type == conditionType { return &conditions[i] diff --git a/internal/controller/controllers/clusterdeployments_controller_test.go b/internal/controller/controllers/clusterdeployments_controller_test.go index d97251f0dec..ee812675841 100644 --- a/internal/controller/controllers/clusterdeployments_controller_test.go +++ b/internal/controller/controllers/clusterdeployments_controller_test.go @@ -17,12 +17,12 @@ import ( "github.com/openshift/assisted-service/internal/bminventory" "github.com/openshift/assisted-service/internal/cluster" "github.com/openshift/assisted-service/internal/common" + hiveext "github.com/openshift/assisted-service/internal/controller/api/hiveextension/v1beta1" "github.com/openshift/assisted-service/internal/host" "github.com/openshift/assisted-service/internal/manifests" "github.com/openshift/assisted-service/models" "github.com/openshift/assisted-service/restapi/operations/installer" hivev1 "github.com/openshift/hive/apis/hive/v1" - "github.com/openshift/hive/apis/hive/v1/agent" "github.com/openshift/hive/apis/hive/v1/aws" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" @@ -57,95 +57,98 @@ func newClusterDeployment(name, namespace string, spec hivev1.ClusterDeploymentS } } -func getDefaultSNOClusterDeploymentSpec(clusterName, pullSecretName string) hivev1.ClusterDeploymentSpec { - return hivev1.ClusterDeploymentSpec{ - ClusterName: clusterName, - BaseDomain: "hive.example.com", - Provisioning: &hivev1.Provisioning{ - InstallConfigSecretRef: &corev1.LocalObjectReference{Name: "cluster-install-config"}, - ImageSetRef: &hivev1.ClusterImageSetReference{Name: "openshift-v4.8.0"}, - InstallStrategy: &hivev1.InstallStrategy{ - Agent: &agent.InstallStrategy{ - Networking: agent.Networking{ - MachineNetwork: nil, - ClusterNetwork: []agent.ClusterNetworkEntry{{ - CIDR: "10.128.0.0/14", - HostPrefix: 23, - }}, - ServiceNetwork: []string{"172.30.0.0/16"}, - }, - SSHPublicKey: "some-key", - ProvisionRequirements: agent.ProvisionRequirements{ - ControlPlaneAgents: 1, - WorkerAgents: 0, - }, - }, - }, +func getDefaultSNOAgentClusterInstallSpec(clusterName string) hiveext.AgentClusterInstallSpec { + return hiveext.AgentClusterInstallSpec{ + Networking: hiveext.Networking{ + MachineNetwork: nil, + ClusterNetwork: []hiveext.ClusterNetworkEntry{{ + CIDR: "10.128.0.0/14", + HostPrefix: 23, + }}, + ServiceNetwork: []string{"172.30.0.0/16"}, }, - Platform: hivev1.Platform{ - AgentBareMetal: &agent.BareMetalPlatform{}, + SSHPublicKey: "some-key", + ProvisionRequirements: hiveext.ProvisionRequirements{ + ControlPlaneAgents: 1, + WorkerAgents: 0, }, - PullSecretRef: &corev1.LocalObjectReference{ - Name: pullSecretName, + ImageSetRef: hivev1.ClusterImageSetReference{Name: "openshift-v4.8.0"}, + ClusterDeploymentRef: corev1.LocalObjectReference{Name: clusterName}, + } +} + +func newAgentClusterInstall(name, namespace string, spec hiveext.AgentClusterInstallSpec) *hiveext.AgentClusterInstall { + return &hiveext.AgentClusterInstall{ + Spec: spec, + TypeMeta: metav1.TypeMeta{ + Kind: "AgentClusterInstall", + APIVersion: "hiveextension/v1beta1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, }, } } -func getDefaultClusterDeploymentSpec(clusterName, pullSecretName string) hivev1.ClusterDeploymentSpec { +func getDefaultAgentClusterInstallSpec(clusterName string) hiveext.AgentClusterInstallSpec { + return hiveext.AgentClusterInstallSpec{ + APIVIP: "1.2.3.8", + IngressVIP: "1.2.3.9", + Networking: hiveext.Networking{ + MachineNetwork: nil, + ClusterNetwork: []hiveext.ClusterNetworkEntry{{ + CIDR: "10.128.0.0/14", + HostPrefix: 23, + }}, + ServiceNetwork: []string{"172.30.0.0/16"}, + }, + SSHPublicKey: "some-key", + ProvisionRequirements: hiveext.ProvisionRequirements{ + ControlPlaneAgents: 3, + WorkerAgents: 2, + }, + ImageSetRef: hivev1.ClusterImageSetReference{Name: "openshift-v4.8.0"}, + ClusterDeploymentRef: corev1.LocalObjectReference{Name: clusterName}, + } +} + +func getDefaultClusterDeploymentSpec(clusterName, aciName, pullSecretName string) hivev1.ClusterDeploymentSpec { return hivev1.ClusterDeploymentSpec{ BaseDomain: "hive.example.com", ClusterName: clusterName, - Provisioning: &hivev1.Provisioning{ - InstallConfigSecretRef: &corev1.LocalObjectReference{Name: "cluster-install-config"}, - ImageSetRef: &hivev1.ClusterImageSetReference{Name: "openshift-v4.8.0"}, - InstallStrategy: &hivev1.InstallStrategy{ - Agent: &agent.InstallStrategy{ - Networking: agent.Networking{ - MachineNetwork: nil, - ClusterNetwork: []agent.ClusterNetworkEntry{{ - CIDR: "10.128.0.0/14", - HostPrefix: 23, - }}, - ServiceNetwork: []string{"172.30.0.0/16"}, - }, - SSHPublicKey: "some-key", - ProvisionRequirements: agent.ProvisionRequirements{ - ControlPlaneAgents: 3, - WorkerAgents: 2, - }, - }, - }, - }, - Platform: hivev1.Platform{ - AgentBareMetal: &agent.BareMetalPlatform{ - APIVIP: "1.2.3.8", - IngressVIP: "1.2.3.9", - }, - }, PullSecretRef: &corev1.LocalObjectReference{ Name: pullSecretName, }, + ClusterInstallRef: &hivev1.ClusterInstallLocalReference{ + Group: hiveext.Group, + Version: hiveext.Version, + Kind: "AgentClusterInstall", + Name: aciName, + }, } } var _ = Describe("cluster reconcile", func() { var ( - c client.Client - cr *ClusterDeploymentsReconciler - ctx = context.Background() - mockCtrl *gomock.Controller - mockInstallerInternal *bminventory.MockInstallerInternals - mockClusterApi *cluster.MockAPI - mockHostApi *host.MockAPI - mockManifestsApi *manifests.MockClusterManifestsInternals - mockCRDEventsHandler *MockCRDEventsHandler - defaultClusterSpec hivev1.ClusterDeploymentSpec - clusterName = "test-cluster" - pullSecretName = "pull-secret" - imageSetName = "openshift-v4.8.0" - releaseImage = "quay.io/openshift-release-dev/ocp-release:4.8.0-x86_64" - ocpReleaseVersion = "4.8.0" - openshiftVersion = &models.OpenshiftVersion{ + c client.Client + cr *ClusterDeploymentsReconciler + ctx = context.Background() + mockCtrl *gomock.Controller + mockInstallerInternal *bminventory.MockInstallerInternals + mockClusterApi *cluster.MockAPI + mockHostApi *host.MockAPI + mockManifestsApi *manifests.MockClusterManifestsInternals + mockCRDEventsHandler *MockCRDEventsHandler + defaultClusterSpec hivev1.ClusterDeploymentSpec + clusterName = "test-cluster" + agentClusterInstallName = "test-cluster-aci" + defaultAgentClusterInstallSpec hiveext.AgentClusterInstallSpec + pullSecretName = "pull-secret" + imageSetName = "openshift-v4.8.0" + releaseImage = "quay.io/openshift-release-dev/ocp-release:4.8.0-x86_64" + ocpReleaseVersion = "4.8.0" + openshiftVersion = &models.OpenshiftVersion{ DisplayName: new(string), ReleaseImage: new(string), ReleaseVersion: &ocpReleaseVersion, @@ -165,6 +168,17 @@ var _ = Describe("cluster reconcile", func() { return &cluster } + getTestClusterInstall := func() *hiveext.AgentClusterInstall { + clusterInstall := &hiveext.AgentClusterInstall{} + Expect(c.Get(ctx, + types.NamespacedName{ + Namespace: testNamespace, + Name: agentClusterInstallName, + }, + clusterInstall)).To(BeNil()) + return clusterInstall + } + getSecret := func(namespace, name string) *corev1.Secret { var secret corev1.Secret key := types.NamespacedName{ @@ -176,7 +190,8 @@ var _ = Describe("cluster reconcile", func() { } BeforeEach(func() { - defaultClusterSpec = getDefaultClusterDeploymentSpec(clusterName, pullSecretName) + defaultClusterSpec = getDefaultClusterDeploymentSpec(clusterName, agentClusterInstallName, pullSecretName) + defaultAgentClusterInstallSpec = getDefaultAgentClusterInstallSpec(clusterName) c = fakeclient.NewClientBuilder().WithScheme(scheme.Scheme).Build() mockCtrl = gomock.NewController(GinkgoT()) mockInstallerInternal = bminventory.NewMockInstallerInternals(mockCtrl) @@ -229,11 +244,11 @@ var _ = Describe("cluster reconcile", func() { Expect(err).To(BeNil()) Expect(result).To(Equal(ctrl.Result{})) - cluster = getTestCluster() - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterSpecSyncedCondition).Reason).To(Equal(SyncedOkReason)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterReadyForInstallationCondition).Reason).To(Equal(ClusterNotReadyReason)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterReadyForInstallationCondition).Message).To(Equal(ClusterNotReadyMsg)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterReadyForInstallationCondition).Status).To(Equal(corev1.ConditionFalse)) + aci := getTestClusterInstall() + Expect(FindStatusCondition(aci.Status.Conditions, ClusterSpecSyncedCondition).Reason).To(Equal(SyncedOkReason)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterRequirementsMetCondition).Reason).To(Equal(ClusterNotReadyReason)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterRequirementsMetCondition).Message).To(Equal(ClusterNotReadyMsg)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterRequirementsMetCondition).Status).To(Equal(corev1.ConditionFalse)) } It("create new cluster", func() { @@ -246,7 +261,8 @@ var _ = Describe("cluster reconcile", func() { cluster := newClusterDeployment(clusterName, testNamespace, defaultClusterSpec) Expect(c.Create(ctx, cluster)).ShouldNot(HaveOccurred()) - + aci := newAgentClusterInstall(agentClusterInstallName, testNamespace, defaultAgentClusterInstallSpec) + Expect(c.Create(ctx, aci)).ShouldNot(HaveOccurred()) validateCreation(cluster) }) @@ -258,9 +274,12 @@ var _ = Describe("cluster reconcile", func() { mockInstallerInternal.EXPECT().AddOpenshiftVersion(gomock.Any(), gomock.Any(), gomock.Any()).Return(openshiftVersion, nil) cluster := newClusterDeployment(clusterName, testNamespace, - getDefaultSNOClusterDeploymentSpec(clusterName, pullSecretName)) + getDefaultClusterDeploymentSpec(clusterName, agentClusterInstallName, pullSecretName)) Expect(c.Create(ctx, cluster)).ShouldNot(HaveOccurred()) + aci := newAgentClusterInstall(agentClusterInstallName, testNamespace, getDefaultSNOAgentClusterInstallSpec(clusterName)) + Expect(c.Create(ctx, aci)).ShouldNot(HaveOccurred()) + validateCreation(cluster) }) @@ -273,10 +292,13 @@ var _ = Describe("cluster reconcile", func() { mockInstallerInternal.EXPECT().AddOpenshiftVersion(gomock.Any(), gomock.Any(), gomock.Any()).Return(openshiftVersion, nil) cluster := newClusterDeployment(clusterName, testNamespace, defaultClusterSpec) - cluster.Spec.Provisioning.InstallStrategy.Agent.ProvisionRequirements.WorkerAgents = 0 - cluster.Spec.Provisioning.InstallStrategy.Agent.ProvisionRequirements.ControlPlaneAgents = 1 Expect(c.Create(ctx, cluster)).ShouldNot(HaveOccurred()) + aci := newAgentClusterInstall(agentClusterInstallName, testNamespace, defaultAgentClusterInstallSpec) + aci.Spec.ProvisionRequirements.WorkerAgents = 0 + aci.Spec.ProvisionRequirements.ControlPlaneAgents = 1 + Expect(c.Create(ctx, aci)).ShouldNot(HaveOccurred()) + validateCreation(cluster) }) }) @@ -290,16 +312,19 @@ var _ = Describe("cluster reconcile", func() { cluster := newClusterDeployment(clusterName, testNamespace, defaultClusterSpec) Expect(c.Create(ctx, cluster)).ShouldNot(HaveOccurred()) + aci := newAgentClusterInstall(agentClusterInstallName, testNamespace, getDefaultSNOAgentClusterInstallSpec(clusterName)) + Expect(c.Create(ctx, aci)).ShouldNot(HaveOccurred()) + request := newClusterDeploymentRequest(cluster) result, err := cr.Reconcile(ctx, request) Expect(err).To(BeNil()) Expect(result).To(Equal(ctrl.Result{RequeueAfter: defaultRequeueAfterOnError})) - cluster = getTestCluster() + aci = getTestClusterInstall() expectedState := fmt.Sprintf("%s %s", BackendErrorMsg, errString) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterSpecSyncedCondition).Reason).To(Equal(BackendErrorReason)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterSpecSyncedCondition).Status).To(Equal(corev1.ConditionFalse)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterSpecSyncedCondition).Message).To(Equal(expectedState)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterSpecSyncedCondition).Reason).To(Equal(BackendErrorReason)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterSpecSyncedCondition).Status).To(Equal(corev1.ConditionFalse)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterSpecSyncedCondition).Message).To(Equal(expectedState)) }) }) @@ -332,6 +357,9 @@ var _ = Describe("cluster reconcile", func() { cluster.Status = hivev1.ClusterDeploymentStatus{} Expect(c.Create(ctx, cluster)).ShouldNot(HaveOccurred()) + aci := newAgentClusterInstall(agentClusterInstallName, testNamespace, defaultAgentClusterInstallSpec) + Expect(c.Create(ctx, aci)).ShouldNot(HaveOccurred()) + expectedErr := "expected-error" mockInstallerInternal.EXPECT().GetClusterByKubeKey(gomock.Any()).Return(nil, errors.Errorf(expectedErr)) @@ -339,11 +367,11 @@ var _ = Describe("cluster reconcile", func() { result, err := cr.Reconcile(ctx, request) Expect(err).To(BeNil()) Expect(result).To(Equal(ctrl.Result{RequeueAfter: defaultRequeueAfterOnError})) - cluster = getTestCluster() + aci = getTestClusterInstall() expectedState := fmt.Sprintf("%s %s", BackendErrorMsg, expectedErr) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterSpecSyncedCondition).Reason).To(Equal(BackendErrorReason)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterSpecSyncedCondition).Status).To(Equal(corev1.ConditionFalse)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterSpecSyncedCondition).Message).To(Equal(expectedState)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterSpecSyncedCondition).Reason).To(Equal(BackendErrorReason)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterSpecSyncedCondition).Status).To(Equal(corev1.ConditionFalse)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterSpecSyncedCondition).Message).To(Equal(expectedState)) }) Context("cluster deletion", func() { @@ -420,6 +448,8 @@ var _ = Describe("cluster reconcile", func() { cluster = newClusterDeployment(clusterName, testNamespace, defaultClusterSpec) Expect(c.Create(ctx, cluster)).ShouldNot(HaveOccurred()) + aci := newAgentClusterInstall(agentClusterInstallName, testNamespace, defaultAgentClusterInstallSpec) + Expect(c.Create(ctx, aci)).ShouldNot(HaveOccurred()) request = newClusterDeploymentRequest(cluster) result, err = cr.Reconcile(ctx, request) @@ -432,6 +462,7 @@ var _ = Describe("cluster reconcile", func() { var ( sId strfmt.UUID cluster *hivev1.ClusterDeployment + aci *hiveext.AgentClusterInstall backEndCluster *common.Cluster ) @@ -444,19 +475,21 @@ var _ = Describe("cluster reconcile", func() { id := uuid.New() sId = strfmt.UUID(id.String()) Expect(c.Create(ctx, cluster)).ShouldNot(HaveOccurred()) + aci = newAgentClusterInstall(agentClusterInstallName, testNamespace, defaultAgentClusterInstallSpec) + Expect(c.Create(ctx, aci)).ShouldNot(HaveOccurred()) backEndCluster = &common.Cluster{ Cluster: models.Cluster{ ID: &sId, Name: clusterName, OpenshiftVersion: "4.8", - ClusterNetworkCidr: defaultClusterSpec.Provisioning.InstallStrategy.Agent.Networking.ClusterNetwork[0].CIDR, - ClusterNetworkHostPrefix: int64(defaultClusterSpec.Provisioning.InstallStrategy.Agent.Networking.ClusterNetwork[0].HostPrefix), + ClusterNetworkCidr: defaultAgentClusterInstallSpec.Networking.ClusterNetwork[0].CIDR, + ClusterNetworkHostPrefix: int64(defaultAgentClusterInstallSpec.Networking.ClusterNetwork[0].HostPrefix), Status: swag.String(models.ClusterStatusReady), - ServiceNetworkCidr: defaultClusterSpec.Provisioning.InstallStrategy.Agent.Networking.ServiceNetwork[0], - IngressVip: defaultClusterSpec.Platform.AgentBareMetal.IngressVIP, - APIVip: defaultClusterSpec.Platform.AgentBareMetal.APIVIP, + ServiceNetworkCidr: defaultAgentClusterInstallSpec.Networking.ServiceNetwork[0], + IngressVip: defaultAgentClusterInstallSpec.IngressVIP, + APIVip: defaultAgentClusterInstallSpec.APIVIP, BaseDNSDomain: defaultClusterSpec.BaseDomain, - SSHPublicKey: defaultClusterSpec.Provisioning.InstallStrategy.Agent.SSHPublicKey, + SSHPublicKey: defaultAgentClusterInstallSpec.SSHPublicKey, Kind: swag.String(models.ClusterKindCluster), }, PullSecret: testPullSecretVal, @@ -496,10 +529,10 @@ var _ = Describe("cluster reconcile", func() { Expect(err).To(BeNil()) Expect(result).To(Equal(ctrl.Result{})) - cluster = getTestCluster() - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterInstalledCondition).Reason).To(Equal(InstallationInProgressReason)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterInstalledCondition).Message).To(Equal(InstallationInProgressMsg + " Waiting for control plane")) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterInstalledCondition).Status).To(Equal(corev1.ConditionFalse)) + aci = getTestClusterInstall() + Expect(FindStatusCondition(aci.Status.Conditions, ClusterCompletedCondition).Reason).To(Equal(InstallationInProgressReason)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterCompletedCondition).Message).To(Equal(InstallationInProgressMsg + " Waiting for control plane")) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterCompletedCondition).Status).To(Equal(corev1.ConditionFalse)) }) It("installed", func() { @@ -532,13 +565,13 @@ var _ = Describe("cluster reconcile", func() { Expect(err).To(BeNil()) Expect(result).To(Equal(ctrl.Result{})) + aci = getTestClusterInstall() cluster = getTestCluster() - Expect(cluster.Spec.Installed).To(BeTrue()) - Expect(cluster.Spec.ClusterMetadata.ClusterID).To(Equal(openshiftID.String())) - secretAdmin := getSecret(cluster.Namespace, cluster.Spec.ClusterMetadata.AdminPasswordSecretRef.Name) + Expect(aci.Spec.ClusterMetadata.ClusterID).To(Equal(openshiftID.String())) + secretAdmin := getSecret(cluster.Namespace, aci.Spec.ClusterMetadata.AdminPasswordSecretRef.Name) Expect(string(secretAdmin.Data["password"])).To(Equal(password)) Expect(string(secretAdmin.Data["username"])).To(Equal(username)) - secretKubeConfig := getSecret(cluster.Namespace, cluster.Spec.ClusterMetadata.AdminKubeconfigSecretRef.Name) + secretKubeConfig := getSecret(cluster.Namespace, aci.Spec.ClusterMetadata.AdminKubeconfigSecretRef.Name) Expect(string(secretKubeConfig.Data["kubeconfig"])).To(Equal(kubeconfig)) }) @@ -559,29 +592,28 @@ var _ = Describe("cluster reconcile", func() { mockInstallerInternal.EXPECT().GetCredentialsInternal(gomock.Any(), gomock.Any()).Return(cred, nil).Times(1) mockInstallerInternal.EXPECT().DownloadClusterKubeconfigInternal(gomock.Any(), gomock.Any()).Return(ioutil.NopCloser(strings.NewReader(kubeconfig)), int64(len(kubeconfig)), nil).Times(1) mockInstallerInternal.EXPECT().DeregisterClusterInternal(gomock.Any(), gomock.Any()).Return(nil).Times(1) - cluster.Spec.Provisioning.InstallStrategy.Agent.ProvisionRequirements.WorkerAgents = 0 - cluster.Spec.Provisioning.InstallStrategy.Agent.ProvisionRequirements.ControlPlaneAgents = 1 + aci.Spec.ProvisionRequirements.WorkerAgents = 0 + aci.Spec.ProvisionRequirements.ControlPlaneAgents = 1 cluster.Spec.BaseDomain = "hive.example.com" Expect(c.Update(ctx, cluster)).Should(BeNil()) + Expect(c.Update(ctx, aci)).Should(BeNil()) request := newClusterDeploymentRequest(cluster) result, err := cr.Reconcile(ctx, request) Expect(err).To(BeNil()) Expect(result).To(Equal(ctrl.Result{})) + aci = getTestClusterInstall() + Expect(FindStatusCondition(aci.Status.Conditions, ClusterSpecSyncedCondition).Reason).To(Equal(SyncedOkReason)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterCompletedCondition).Reason).To(Equal(InstalledReason)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterCompletedCondition).Message).To(Equal(InstalledMsg + " Done")) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterCompletedCondition).Status).To(Equal(corev1.ConditionTrue)) + cluster = getTestCluster() - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterSpecSyncedCondition).Reason).To(Equal(SyncedOkReason)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterInstalledCondition).Reason).To(Equal(InstalledReason)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterInstalledCondition).Message).To(Equal(InstalledMsg + " Done")) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterInstalledCondition).Status).To(Equal(corev1.ConditionTrue)) - - Expect(cluster.Spec.Installed).To(BeTrue()) - Expect(cluster.Spec.ClusterMetadata.ClusterID).To(Equal(openshiftID.String())) - Expect(cluster.Status.WebConsoleURL).To(Equal("https://console-openshift-console.apps.test-cluster.hive.example.com")) - Expect(cluster.Status.APIURL).To(Equal("https://api.test-cluster.hive.example.com:6443")) - secretAdmin := getSecret(cluster.Namespace, cluster.Spec.ClusterMetadata.AdminPasswordSecretRef.Name) + Expect(aci.Spec.ClusterMetadata.ClusterID).To(Equal(openshiftID.String())) + secretAdmin := getSecret(cluster.Namespace, aci.Spec.ClusterMetadata.AdminPasswordSecretRef.Name) Expect(string(secretAdmin.Data["password"])).To(Equal(password)) Expect(string(secretAdmin.Data["username"])).To(Equal(username)) - secretKubeConfig := getSecret(cluster.Namespace, cluster.Spec.ClusterMetadata.AdminKubeconfigSecretRef.Name) + secretKubeConfig := getSecret(cluster.Namespace, aci.Spec.ClusterMetadata.AdminKubeconfigSecretRef.Name) Expect(string(secretKubeConfig.Data["kubeconfig"])).To(Equal(kubeconfig)) }) @@ -608,13 +640,13 @@ var _ = Describe("cluster reconcile", func() { Expect(err).To(BeNil()) Expect(result).To(Equal(ctrl.Result{RequeueAfter: defaultRequeueAfterOnError})) - cluster = getTestCluster() + aci = getTestClusterInstall() expectedState := fmt.Sprintf("%s %s", BackendErrorMsg, expectedError) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterSpecSyncedCondition).Reason).To(Equal(BackendErrorReason)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterSpecSyncedCondition).Status).To(Equal(corev1.ConditionFalse)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterSpecSyncedCondition).Message).To(Equal(expectedState)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterInstalledCondition).Reason).To(Equal(InstalledReason)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterInstalledCondition).Status).To(Equal(corev1.ConditionTrue)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterSpecSyncedCondition).Reason).To(Equal(BackendErrorReason)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterSpecSyncedCondition).Status).To(Equal(corev1.ConditionFalse)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterSpecSyncedCondition).Message).To(Equal(expectedState)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterCompletedCondition).Reason).To(Equal(InstalledReason)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterCompletedCondition).Status).To(Equal(corev1.ConditionTrue)) }) It("Fail to create day2", func() { @@ -642,13 +674,13 @@ var _ = Describe("cluster reconcile", func() { Expect(err).To(BeNil()) Expect(result).To(Equal(ctrl.Result{RequeueAfter: defaultRequeueAfterOnError})) - cluster = getTestCluster() + aci = getTestClusterInstall() expectedState := fmt.Sprintf("%s %s", BackendErrorMsg, expectedErr) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterSpecSyncedCondition).Reason).To(Equal(BackendErrorReason)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterSpecSyncedCondition).Status).To(Equal(corev1.ConditionFalse)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterSpecSyncedCondition).Message).To(Equal(expectedState)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterInstalledCondition).Reason).To(Equal(NotAvailableReason)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterInstalledCondition).Status).To(Equal(corev1.ConditionUnknown)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterSpecSyncedCondition).Reason).To(Equal(BackendErrorReason)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterSpecSyncedCondition).Status).To(Equal(corev1.ConditionFalse)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterSpecSyncedCondition).Message).To(Equal(expectedState)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterCompletedCondition).Reason).To(Equal(NotAvailableReason)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterCompletedCondition).Status).To(Equal(corev1.ConditionUnknown)) }) It("Create day2 if day1 is already deleted none SNO", func() { @@ -662,15 +694,20 @@ var _ = Describe("cluster reconcile", func() { } mockInstallerInternal.EXPECT().RegisterAddHostsClusterInternal(gomock.Any(), gomock.Any(), gomock.Any()).Return(clusterReply, nil) mockInstallerInternal.EXPECT().AddOpenshiftVersion(gomock.Any(), gomock.Any(), gomock.Any()).Return(openshiftVersion, nil) - cluster.Spec.Installed = true - Expect(c.Update(ctx, cluster)).Should(BeNil()) + setClusterCondition(&aci.Status.Conditions, hivev1.ClusterInstallCondition{ + Type: ClusterCompletedCondition, + Status: corev1.ConditionTrue, + Reason: InstalledReason, + Message: InstalledMsg, + }) + Expect(c.Status().Update(ctx, aci)).Should(BeNil()) request := newClusterDeploymentRequest(cluster) result, err := cr.Reconcile(ctx, request) Expect(err).To(BeNil()) Expect(result).To(Equal(ctrl.Result{})) - cluster = getTestCluster() - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterSpecSyncedCondition).Reason).To(Equal(SyncedOkReason)) + aci = getTestClusterInstall() + Expect(FindStatusCondition(aci.Status.Conditions, ClusterSpecSyncedCondition).Reason).To(Equal(SyncedOkReason)) }) It("installed - fail to get kube config", func() { @@ -693,15 +730,14 @@ var _ = Describe("cluster reconcile", func() { Expect(err).To(BeNil()) Expect(result).To(Equal(ctrl.Result{RequeueAfter: defaultRequeueAfterOnError})) - cluster = getTestCluster() + aci = getTestClusterInstall() expectedState := fmt.Sprintf("%s %s", BackendErrorMsg, expectedErr) - Expect(cluster.Spec.Installed).To(BeFalse()) Expect(cluster.Spec.ClusterMetadata).To(BeNil()) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterSpecSyncedCondition).Reason).To(Equal(BackendErrorReason)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterSpecSyncedCondition).Status).To(Equal(corev1.ConditionFalse)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterSpecSyncedCondition).Message).To(Equal(expectedState)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterInstalledCondition).Reason).To(Equal(InstalledReason)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterInstalledCondition).Status).To(Equal(corev1.ConditionTrue)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterSpecSyncedCondition).Reason).To(Equal(BackendErrorReason)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterSpecSyncedCondition).Status).To(Equal(corev1.ConditionFalse)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterSpecSyncedCondition).Message).To(Equal(expectedState)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterCompletedCondition).Reason).To(Equal(InstalledReason)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterCompletedCondition).Status).To(Equal(corev1.ConditionTrue)) }) It("installed - fail to get admin password", func() { @@ -717,15 +753,14 @@ var _ = Describe("cluster reconcile", func() { Expect(err).To(BeNil()) Expect(result).To(Equal(ctrl.Result{RequeueAfter: defaultRequeueAfterOnError})) - cluster = getTestCluster() - Expect(cluster.Spec.Installed).To(BeFalse()) + aci = getTestClusterInstall() Expect(cluster.Spec.ClusterMetadata).To(BeNil()) expectedState := fmt.Sprintf("%s %s", BackendErrorMsg, expectedErr) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterSpecSyncedCondition).Reason).To(Equal(BackendErrorReason)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterSpecSyncedCondition).Status).To(Equal(corev1.ConditionFalse)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterSpecSyncedCondition).Message).To(Equal(expectedState)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterInstalledCondition).Reason).To(Equal(InstalledReason)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterInstalledCondition).Status).To(Equal(corev1.ConditionTrue)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterSpecSyncedCondition).Reason).To(Equal(BackendErrorReason)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterSpecSyncedCondition).Status).To(Equal(corev1.ConditionFalse)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterSpecSyncedCondition).Message).To(Equal(expectedState)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterCompletedCondition).Reason).To(Equal(InstalledReason)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterCompletedCondition).Status).To(Equal(corev1.ConditionTrue)) }) It("failed to start installation", func() { @@ -744,14 +779,14 @@ var _ = Describe("cluster reconcile", func() { Expect(err).To(BeNil()) Expect(result).To(Equal(ctrl.Result{RequeueAfter: defaultRequeueAfterOnError})) - cluster = getTestCluster() + aci = getTestClusterInstall() expectedState := fmt.Sprintf("%s %s", BackendErrorMsg, expectedErr) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterSpecSyncedCondition).Reason).To(Equal(BackendErrorReason)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterSpecSyncedCondition).Status).To(Equal(corev1.ConditionFalse)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterSpecSyncedCondition).Message).To(Equal(expectedState)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterReadyForInstallationCondition).Reason).To(Equal(ClusterReadyReason)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterReadyForInstallationCondition).Message).To(Equal(ClusterReadyMsg)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterReadyForInstallationCondition).Status).To(Equal(corev1.ConditionTrue)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterSpecSyncedCondition).Reason).To(Equal(BackendErrorReason)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterSpecSyncedCondition).Status).To(Equal(corev1.ConditionFalse)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterSpecSyncedCondition).Message).To(Equal(expectedState)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterRequirementsMetCondition).Reason).To(Equal(ClusterReadyReason)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterRequirementsMetCondition).Message).To(Equal(ClusterReadyMsg)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterRequirementsMetCondition).Status).To(Equal(corev1.ConditionTrue)) }) It("not ready for installation", func() { @@ -764,11 +799,11 @@ var _ = Describe("cluster reconcile", func() { Expect(err).To(BeNil()) Expect(result).To(Equal(ctrl.Result{})) - cluster = getTestCluster() - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterSpecSyncedCondition).Reason).To(Equal(SyncedOkReason)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterReadyForInstallationCondition).Reason).To(Equal(ClusterNotReadyReason)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterReadyForInstallationCondition).Message).To(Equal(ClusterNotReadyMsg)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterReadyForInstallationCondition).Status).To(Equal(corev1.ConditionFalse)) + aci = getTestClusterInstall() + Expect(FindStatusCondition(aci.Status.Conditions, ClusterSpecSyncedCondition).Reason).To(Equal(SyncedOkReason)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterRequirementsMetCondition).Reason).To(Equal(ClusterNotReadyReason)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterRequirementsMetCondition).Message).To(Equal(ClusterNotReadyMsg)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterRequirementsMetCondition).Status).To(Equal(corev1.ConditionFalse)) }) It("not ready for installation - hosts not approved", func() { @@ -784,11 +819,11 @@ var _ = Describe("cluster reconcile", func() { Expect(err).To(BeNil()) Expect(result).To(Equal(ctrl.Result{})) - cluster = getTestCluster() - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterSpecSyncedCondition).Reason).To(Equal(SyncedOkReason)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterReadyForInstallationCondition).Reason).To(Equal(ClusterNotReadyReason)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterReadyForInstallationCondition).Message).To(Equal(ClusterNotReadyMsg)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterReadyForInstallationCondition).Status).To(Equal(corev1.ConditionFalse)) + aci = getTestClusterInstall() + Expect(FindStatusCondition(aci.Status.Conditions, ClusterSpecSyncedCondition).Reason).To(Equal(SyncedOkReason)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterRequirementsMetCondition).Reason).To(Equal(ClusterNotReadyReason)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterRequirementsMetCondition).Message).To(Equal(ClusterNotReadyMsg)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterRequirementsMetCondition).Status).To(Equal(corev1.ConditionFalse)) }) It("install day2 host", func() { @@ -813,11 +848,11 @@ var _ = Describe("cluster reconcile", func() { Expect(err).To(BeNil()) Expect(result).To(Equal(ctrl.Result{})) - cluster = getTestCluster() - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterSpecSyncedCondition).Reason).To(Equal(SyncedOkReason)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterReadyForInstallationCondition).Reason).To(Equal(ClusterAlreadyInstallingReason)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterReadyForInstallationCondition).Message).To(Equal(ClusterAlreadyInstallingMsg)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterReadyForInstallationCondition).Status).To(Equal(corev1.ConditionFalse)) + aci = getTestClusterInstall() + Expect(FindStatusCondition(aci.Status.Conditions, ClusterSpecSyncedCondition).Reason).To(Equal(SyncedOkReason)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterRequirementsMetCondition).Reason).To(Equal(ClusterAlreadyInstallingReason)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterRequirementsMetCondition).Message).To(Equal(ClusterAlreadyInstallingMsg)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterRequirementsMetCondition).Status).To(Equal(corev1.ConditionTrue)) }) It("install failure day2 host", func() { @@ -843,19 +878,19 @@ var _ = Describe("cluster reconcile", func() { Expect(err).To(BeNil()) Expect(result).To(Equal(ctrl.Result{RequeueAfter: defaultRequeueAfterOnError})) - cluster = getTestCluster() + aci = getTestClusterInstall() expectedState := fmt.Sprintf("%s %s", BackendErrorMsg, expectedErr) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterSpecSyncedCondition).Reason).To(Equal(BackendErrorReason)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterSpecSyncedCondition).Status).To(Equal(corev1.ConditionFalse)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterSpecSyncedCondition).Message).To(Equal(expectedState)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterReadyForInstallationCondition).Reason).To(Equal(ClusterAlreadyInstallingReason)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterReadyForInstallationCondition).Message).To(Equal(ClusterAlreadyInstallingMsg)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterReadyForInstallationCondition).Status).To(Equal(corev1.ConditionFalse)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterSpecSyncedCondition).Reason).To(Equal(BackendErrorReason)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterSpecSyncedCondition).Status).To(Equal(corev1.ConditionFalse)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterSpecSyncedCondition).Message).To(Equal(expectedState)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterRequirementsMetCondition).Reason).To(Equal(ClusterAlreadyInstallingReason)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterRequirementsMetCondition).Message).To(Equal(ClusterAlreadyInstallingMsg)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterRequirementsMetCondition).Status).To(Equal(corev1.ConditionTrue)) }) It("Install with manifests - no configmap", func() { - cluster.Spec.Provisioning.ManifestsConfigMapRef = &corev1.LocalObjectReference{Name: "cluster-install-config"} - Expect(c.Update(ctx, cluster)).Should(BeNil()) + aci.Spec.ManifestsConfigMapRef = &corev1.LocalObjectReference{Name: "cluster-install-config"} + Expect(c.Update(ctx, aci)).Should(BeNil()) backEndCluster.Status = swag.String(models.ClusterStatusReady) mockInstallerInternal.EXPECT().GetClusterByKubeKey(gomock.Any()).Return(backEndCluster, nil) @@ -869,13 +904,13 @@ var _ = Describe("cluster reconcile", func() { Expect(err).To(BeNil()) Expect(result).To(Equal(ctrl.Result{Requeue: true, RequeueAfter: 1 * time.Minute})) - cluster = getTestCluster() - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterSpecSyncedCondition).Reason).To(Equal(BackendErrorReason)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterSpecSyncedCondition).Status).To(Equal(corev1.ConditionFalse)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterSpecSyncedCondition).Message).NotTo(Equal("")) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterReadyForInstallationCondition).Reason).To(Equal(ClusterReadyReason)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterReadyForInstallationCondition).Message).To(Equal(ClusterReadyMsg)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterReadyForInstallationCondition).Status).To(Equal(corev1.ConditionTrue)) + aci = getTestClusterInstall() + Expect(FindStatusCondition(aci.Status.Conditions, ClusterSpecSyncedCondition).Reason).To(Equal(BackendErrorReason)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterSpecSyncedCondition).Status).To(Equal(corev1.ConditionFalse)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterSpecSyncedCondition).Message).NotTo(Equal("")) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterRequirementsMetCondition).Reason).To(Equal(ClusterReadyReason)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterRequirementsMetCondition).Message).To(Equal(ClusterReadyMsg)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterRequirementsMetCondition).Status).To(Equal(corev1.ConditionTrue)) }) It("Update manifests - manifests exists , create failed", func() { @@ -901,21 +936,21 @@ var _ = Describe("cluster reconcile", func() { mockManifestsApi.EXPECT().ListClusterManifestsInternal(gomock.Any(), gomock.Any()).Return(models.ListManifests{}, nil).Times(1) mockManifestsApi.EXPECT().CreateClusterManifestInternal(gomock.Any(), gomock.Any()).Return(nil, errors.Errorf("error")).Times(1) request := newClusterDeploymentRequest(cluster) - cluster = getTestCluster() - cluster.Spec.Provisioning.ManifestsConfigMapRef = ref - Expect(c.Update(ctx, cluster)).Should(BeNil()) + aci = getTestClusterInstall() + aci.Spec.ManifestsConfigMapRef = ref + Expect(c.Update(ctx, aci)).Should(BeNil()) result, err := cr.Reconcile(ctx, request) Expect(err).To(BeNil()) Expect(result).To(Equal(ctrl.Result{Requeue: true, RequeueAfter: 1 * time.Minute})) - cluster = getTestCluster() + aci = getTestClusterInstall() expectedState := fmt.Sprintf("%s %s", BackendErrorMsg, "error") - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterSpecSyncedCondition).Reason).To(Equal(BackendErrorReason)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterSpecSyncedCondition).Status).To(Equal(corev1.ConditionFalse)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterSpecSyncedCondition).Message).To(Equal(expectedState)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterReadyForInstallationCondition).Reason).To(Equal(ClusterReadyReason)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterReadyForInstallationCondition).Message).To(Equal(ClusterReadyMsg)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterReadyForInstallationCondition).Status).To(Equal(corev1.ConditionTrue)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterSpecSyncedCondition).Reason).To(Equal(BackendErrorReason)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterSpecSyncedCondition).Status).To(Equal(corev1.ConditionFalse)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterSpecSyncedCondition).Message).To(Equal(expectedState)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterRequirementsMetCondition).Reason).To(Equal(ClusterReadyReason)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterRequirementsMetCondition).Message).To(Equal(ClusterReadyMsg)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterRequirementsMetCondition).Status).To(Equal(corev1.ConditionTrue)) }) It("Update manifests - manifests exists , list failed", func() { @@ -928,20 +963,20 @@ var _ = Describe("cluster reconcile", func() { request := newClusterDeploymentRequest(cluster) cluster = getTestCluster() - cluster.Spec.Provisioning.ManifestsConfigMapRef = ref - Expect(c.Update(ctx, cluster)).Should(BeNil()) + aci.Spec.ManifestsConfigMapRef = ref + Expect(c.Update(ctx, aci)).Should(BeNil()) result, err := cr.Reconcile(ctx, request) Expect(err).To(BeNil()) Expect(result).To(Equal(ctrl.Result{Requeue: true, RequeueAfter: 1 * time.Minute})) - cluster = getTestCluster() + aci = getTestClusterInstall() expectedState := fmt.Sprintf("%s %s", BackendErrorMsg, "error") - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterSpecSyncedCondition).Reason).To(Equal(BackendErrorReason)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterSpecSyncedCondition).Status).To(Equal(corev1.ConditionFalse)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterSpecSyncedCondition).Message).To(Equal(expectedState)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterReadyForInstallationCondition).Reason).To(Equal(ClusterReadyReason)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterReadyForInstallationCondition).Message).To(Equal(ClusterReadyMsg)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterReadyForInstallationCondition).Status).To(Equal(corev1.ConditionTrue)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterSpecSyncedCondition).Reason).To(Equal(BackendErrorReason)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterSpecSyncedCondition).Status).To(Equal(corev1.ConditionFalse)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterSpecSyncedCondition).Message).To(Equal(expectedState)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterRequirementsMetCondition).Reason).To(Equal(ClusterReadyReason)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterRequirementsMetCondition).Message).To(Equal(ClusterReadyMsg)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterRequirementsMetCondition).Status).To(Equal(corev1.ConditionTrue)) }) It("Update manifests - succeed", func() { @@ -978,17 +1013,17 @@ var _ = Describe("cluster reconcile", func() { Return(installClusterReply, nil) cluster = getTestCluster() - cluster.Spec.Provisioning.ManifestsConfigMapRef = ref - Expect(c.Update(ctx, cluster)).Should(BeNil()) + aci.Spec.ManifestsConfigMapRef = ref + Expect(c.Update(ctx, aci)).Should(BeNil()) request := newClusterDeploymentRequest(cluster) result, err := cr.Reconcile(ctx, request) Expect(err).To(BeNil()) Expect(result).To(Equal(ctrl.Result{})) - cluster = getTestCluster() - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterInstalledCondition).Reason).To(Equal(InstallationInProgressReason)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterInstalledCondition).Message).To(Equal(InstallationInProgressMsg + " Waiting for control plane")) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterInstalledCondition).Status).To(Equal(corev1.ConditionFalse)) + aci = getTestClusterInstall() + Expect(FindStatusCondition(aci.Status.Conditions, ClusterCompletedCondition).Reason).To(Equal(InstallationInProgressReason)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterCompletedCondition).Message).To(Equal(InstallationInProgressMsg + " Waiting for control plane")) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterCompletedCondition).Status).To(Equal(corev1.ConditionFalse)) }) It("Update manifests - no manifests", func() { @@ -1015,10 +1050,10 @@ var _ = Describe("cluster reconcile", func() { Expect(err).To(BeNil()) Expect(result).To(Equal(ctrl.Result{})) - cluster = getTestCluster() - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterInstalledCondition).Reason).To(Equal(InstallationInProgressReason)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterInstalledCondition).Message).To(Equal(InstallationInProgressMsg + " Waiting for control plane")) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterInstalledCondition).Status).To(Equal(corev1.ConditionFalse)) + aci = getTestClusterInstall() + Expect(FindStatusCondition(aci.Status.Conditions, ClusterCompletedCondition).Reason).To(Equal(InstallationInProgressReason)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterCompletedCondition).Message).To(Equal(InstallationInProgressMsg + " Waiting for control plane")) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterCompletedCondition).Status).To(Equal(corev1.ConditionFalse)) }) It("Update manifests - delete old + error should be ignored", func() { @@ -1045,10 +1080,10 @@ var _ = Describe("cluster reconcile", func() { Expect(err).To(BeNil()) Expect(result).To(Equal(ctrl.Result{})) - cluster = getTestCluster() - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterInstalledCondition).Reason).To(Equal(InstallationInProgressReason)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterInstalledCondition).Message).To(Equal(InstallationInProgressMsg + " Waiting for control plane")) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterInstalledCondition).Status).To(Equal(corev1.ConditionFalse)) + aci = getTestClusterInstall() + Expect(FindStatusCondition(aci.Status.Conditions, ClusterCompletedCondition).Reason).To(Equal(InstallationInProgressReason)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterCompletedCondition).Message).To(Equal(InstallationInProgressMsg + " Waiting for control plane")) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterCompletedCondition).Status).To(Equal(corev1.ConditionFalse)) }) }) @@ -1056,9 +1091,12 @@ var _ = Describe("cluster reconcile", func() { It("reconcile on installed sno cluster should not return an error or requeue", func() { mockInstallerInternal.EXPECT().GetClusterByKubeKey(gomock.Any()).Return(nil, gorm.ErrRecordNotFound).Times(1) cluster := newClusterDeployment(clusterName, testNamespace, - getDefaultSNOClusterDeploymentSpec(clusterName, pullSecretName)) - cluster.Spec.Installed = true + getDefaultClusterDeploymentSpec(clusterName, agentClusterInstallName, pullSecretName)) Expect(c.Create(ctx, cluster)).ShouldNot(HaveOccurred()) + + aci := newAgentClusterInstall(agentClusterInstallName, testNamespace, getDefaultSNOAgentClusterInstallSpec(clusterName)) + Expect(c.Create(ctx, aci)).ShouldNot(HaveOccurred()) + request := newClusterDeploymentRequest(cluster) result, err := cr.Reconcile(ctx, request) Expect(err).To(BeNil()) @@ -1069,6 +1107,7 @@ var _ = Describe("cluster reconcile", func() { var ( sId strfmt.UUID cluster *hivev1.ClusterDeployment + aci *hiveext.AgentClusterInstall ) BeforeEach(func() { @@ -1080,6 +1119,9 @@ var _ = Describe("cluster reconcile", func() { sId = strfmt.UUID(id.String()) Expect(c.Create(ctx, cluster)).ShouldNot(HaveOccurred()) + + aci = newAgentClusterInstall(agentClusterInstallName, testNamespace, defaultAgentClusterInstallSpec) + Expect(c.Create(ctx, aci)).ShouldNot(HaveOccurred()) }) It("update pull-secret network cidr and cluster name", func() { @@ -1089,7 +1131,7 @@ var _ = Describe("cluster reconcile", func() { Name: "different-cluster-name", OpenshiftVersion: "4.8", ClusterNetworkCidr: "11.129.0.0/14", - ClusterNetworkHostPrefix: int64(defaultClusterSpec.Provisioning.InstallStrategy.Agent.Networking.ClusterNetwork[0].HostPrefix), + ClusterNetworkHostPrefix: int64(defaultAgentClusterInstallSpec.Networking.ClusterNetwork[0].HostPrefix), Status: swag.String(models.ClusterStatusPendingForInput), }, @@ -1109,7 +1151,7 @@ var _ = Describe("cluster reconcile", func() { Expect(swag.StringValue(param.ClusterUpdateParams.PullSecret)).To(Equal(testPullSecretVal)) Expect(swag.StringValue(param.ClusterUpdateParams.Name)).To(Equal(defaultClusterSpec.ClusterName)) Expect(swag.StringValue(param.ClusterUpdateParams.ClusterNetworkCidr)). - To(Equal(defaultClusterSpec.Provisioning.InstallStrategy.Agent.Networking.ClusterNetwork[0].CIDR)) + To(Equal(defaultAgentClusterInstallSpec.Networking.ClusterNetwork[0].CIDR)) }).Return(updateReply, nil) request := newClusterDeploymentRequest(cluster) @@ -1117,11 +1159,11 @@ var _ = Describe("cluster reconcile", func() { Expect(err).To(BeNil()) Expect(result).To(Equal(ctrl.Result{})) - cluster = getTestCluster() - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterSpecSyncedCondition).Reason).To(Equal(SyncedOkReason)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterReadyForInstallationCondition).Reason).To(Equal(ClusterNotReadyReason)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterReadyForInstallationCondition).Message).To(Equal(ClusterNotReadyMsg)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterReadyForInstallationCondition).Status).To(Equal(corev1.ConditionFalse)) + aci := getTestClusterInstall() + Expect(FindStatusCondition(aci.Status.Conditions, ClusterSpecSyncedCondition).Reason).To(Equal(SyncedOkReason)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterRequirementsMetCondition).Reason).To(Equal(ClusterNotReadyReason)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterRequirementsMetCondition).Message).To(Equal(ClusterNotReadyMsg)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterRequirementsMetCondition).Status).To(Equal(corev1.ConditionFalse)) }) It("only state changed", func() { @@ -1130,14 +1172,14 @@ var _ = Describe("cluster reconcile", func() { ID: &sId, Name: clusterName, OpenshiftVersion: "4.8", - ClusterNetworkCidr: defaultClusterSpec.Provisioning.InstallStrategy.Agent.Networking.ClusterNetwork[0].CIDR, - ClusterNetworkHostPrefix: int64(defaultClusterSpec.Provisioning.InstallStrategy.Agent.Networking.ClusterNetwork[0].HostPrefix), + ClusterNetworkCidr: defaultAgentClusterInstallSpec.Networking.ClusterNetwork[0].CIDR, + ClusterNetworkHostPrefix: int64(defaultAgentClusterInstallSpec.Networking.ClusterNetwork[0].HostPrefix), Status: swag.String(models.ClusterStatusInsufficient), - ServiceNetworkCidr: defaultClusterSpec.Provisioning.InstallStrategy.Agent.Networking.ServiceNetwork[0], - IngressVip: defaultClusterSpec.Platform.AgentBareMetal.IngressVIP, - APIVip: defaultClusterSpec.Platform.AgentBareMetal.APIVIP, + ServiceNetworkCidr: defaultAgentClusterInstallSpec.Networking.ServiceNetwork[0], + IngressVip: defaultAgentClusterInstallSpec.IngressVIP, + APIVip: defaultAgentClusterInstallSpec.APIVIP, BaseDNSDomain: defaultClusterSpec.BaseDomain, - SSHPublicKey: defaultClusterSpec.Provisioning.InstallStrategy.Agent.SSHPublicKey, + SSHPublicKey: defaultAgentClusterInstallSpec.SSHPublicKey, Kind: swag.String(models.ClusterKindCluster), ValidationsInfo: "{\"some-check\":[{\"id\":\"checking1\",\"status\":\"failure\",\"message\":\"Check1 is not OK\"},{\"id\":\"checking2\",\"status\":\"success\",\"message\":\"Check2 is OK\"},{\"id\":\"checking3\",\"status\":\"failure\",\"message\":\"Check3 is not OK\"}]}", }, @@ -1151,13 +1193,13 @@ var _ = Describe("cluster reconcile", func() { Expect(err).To(BeNil()) Expect(result).To(Equal(ctrl.Result{})) - cluster = getTestCluster() - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterReadyForInstallationCondition).Reason).To(Equal(ClusterNotReadyReason)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterReadyForInstallationCondition).Message).To(Equal(ClusterNotReadyMsg)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterReadyForInstallationCondition).Status).To(Equal(corev1.ConditionFalse)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterValidatedCondition).Reason).To(Equal(ValidationsFailingReason)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterValidatedCondition).Message).To(Equal(ClusterValidationsFailingMsg + " Check1 is not OK,Check3 is not OK")) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterValidatedCondition).Status).To(Equal(corev1.ConditionFalse)) + aci := getTestClusterInstall() + Expect(FindStatusCondition(aci.Status.Conditions, ClusterRequirementsMetCondition).Reason).To(Equal(ClusterNotReadyReason)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterRequirementsMetCondition).Message).To(Equal(ClusterNotReadyMsg)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterRequirementsMetCondition).Status).To(Equal(corev1.ConditionFalse)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterValidatedCondition).Reason).To(Equal(ValidationsFailingReason)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterValidatedCondition).Message).To(Equal(ClusterValidationsFailingMsg + " Check1 is not OK,Check3 is not OK")) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterValidatedCondition).Status).To(Equal(corev1.ConditionFalse)) }) It("failed getting cluster", func() { @@ -1169,11 +1211,11 @@ var _ = Describe("cluster reconcile", func() { result, err := cr.Reconcile(ctx, request) Expect(err).To(BeNil()) Expect(result).To(Equal(ctrl.Result{RequeueAfter: defaultRequeueAfterOnError})) - cluster = getTestCluster() + aci := getTestClusterInstall() expectedState := fmt.Sprintf("%s %s", BackendErrorMsg, expectedErr) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterSpecSyncedCondition).Reason).To(Equal(BackendErrorReason)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterSpecSyncedCondition).Status).To(Equal(corev1.ConditionFalse)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterSpecSyncedCondition).Message).To(Equal(expectedState)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterSpecSyncedCondition).Reason).To(Equal(BackendErrorReason)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterSpecSyncedCondition).Status).To(Equal(corev1.ConditionFalse)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterSpecSyncedCondition).Message).To(Equal(expectedState)) }) It("update internal error", func() { @@ -1197,11 +1239,11 @@ var _ = Describe("cluster reconcile", func() { Expect(err).To(BeNil()) Expect(result).To(Equal(ctrl.Result{RequeueAfter: defaultRequeueAfterOnError})) - cluster = getTestCluster() + aci := getTestClusterInstall() expectedState := fmt.Sprintf("%s %s", BackendErrorMsg, errString) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterSpecSyncedCondition).Reason).To(Equal(BackendErrorReason)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterSpecSyncedCondition).Status).To(Equal(corev1.ConditionFalse)) - Expect(FindStatusCondition(cluster.Status.Conditions, ClusterSpecSyncedCondition).Message).To(Equal(expectedState)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterSpecSyncedCondition).Reason).To(Equal(BackendErrorReason)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterSpecSyncedCondition).Status).To(Equal(corev1.ConditionFalse)) + Expect(FindStatusCondition(aci.Status.Conditions, ClusterSpecSyncedCondition).Message).To(Equal(expectedState)) }) It("add install config overrides annotation", func() { @@ -1210,14 +1252,14 @@ var _ = Describe("cluster reconcile", func() { ID: &sId, Name: clusterName, OpenshiftVersion: "4.8", - ClusterNetworkCidr: defaultClusterSpec.Provisioning.InstallStrategy.Agent.Networking.ClusterNetwork[0].CIDR, - ClusterNetworkHostPrefix: int64(defaultClusterSpec.Provisioning.InstallStrategy.Agent.Networking.ClusterNetwork[0].HostPrefix), + ClusterNetworkCidr: defaultAgentClusterInstallSpec.Networking.ClusterNetwork[0].CIDR, + ClusterNetworkHostPrefix: int64(defaultAgentClusterInstallSpec.Networking.ClusterNetwork[0].HostPrefix), Status: swag.String(models.ClusterStatusInsufficient), - ServiceNetworkCidr: defaultClusterSpec.Provisioning.InstallStrategy.Agent.Networking.ServiceNetwork[0], - IngressVip: defaultClusterSpec.Platform.AgentBareMetal.IngressVIP, - APIVip: defaultClusterSpec.Platform.AgentBareMetal.APIVIP, + ServiceNetworkCidr: defaultAgentClusterInstallSpec.Networking.ServiceNetwork[0], + IngressVip: defaultAgentClusterInstallSpec.IngressVIP, + APIVip: defaultAgentClusterInstallSpec.APIVIP, BaseDNSDomain: defaultClusterSpec.BaseDomain, - SSHPublicKey: defaultClusterSpec.Provisioning.InstallStrategy.Agent.SSHPublicKey, + SSHPublicKey: defaultAgentClusterInstallSpec.SSHPublicKey, }, PullSecret: testPullSecretVal, } @@ -1251,14 +1293,14 @@ var _ = Describe("cluster reconcile", func() { ID: &sId, Name: clusterName, OpenshiftVersion: "4.8", - ClusterNetworkCidr: defaultClusterSpec.Provisioning.InstallStrategy.Agent.Networking.ClusterNetwork[0].CIDR, - ClusterNetworkHostPrefix: int64(defaultClusterSpec.Provisioning.InstallStrategy.Agent.Networking.ClusterNetwork[0].HostPrefix), + ClusterNetworkCidr: defaultAgentClusterInstallSpec.Networking.ClusterNetwork[0].CIDR, + ClusterNetworkHostPrefix: int64(defaultAgentClusterInstallSpec.Networking.ClusterNetwork[0].HostPrefix), Status: swag.String(models.ClusterStatusInsufficient), - ServiceNetworkCidr: defaultClusterSpec.Provisioning.InstallStrategy.Agent.Networking.ServiceNetwork[0], - IngressVip: defaultClusterSpec.Platform.AgentBareMetal.IngressVIP, - APIVip: defaultClusterSpec.Platform.AgentBareMetal.APIVIP, + ServiceNetworkCidr: defaultAgentClusterInstallSpec.Networking.ServiceNetwork[0], + IngressVip: defaultAgentClusterInstallSpec.IngressVIP, + APIVip: defaultAgentClusterInstallSpec.APIVIP, BaseDNSDomain: defaultClusterSpec.BaseDomain, - SSHPublicKey: defaultClusterSpec.Provisioning.InstallStrategy.Agent.SSHPublicKey, + SSHPublicKey: defaultAgentClusterInstallSpec.SSHPublicKey, InstallConfigOverrides: `{"controlPlane": {"hyperthreading": "Disabled"}}`, }, PullSecret: testPullSecretVal, @@ -1289,14 +1331,14 @@ var _ = Describe("cluster reconcile", func() { ID: &sId, Name: clusterName, OpenshiftVersion: "4.8", - ClusterNetworkCidr: defaultClusterSpec.Provisioning.InstallStrategy.Agent.Networking.ClusterNetwork[0].CIDR, - ClusterNetworkHostPrefix: int64(defaultClusterSpec.Provisioning.InstallStrategy.Agent.Networking.ClusterNetwork[0].HostPrefix), + ClusterNetworkCidr: defaultAgentClusterInstallSpec.Networking.ClusterNetwork[0].CIDR, + ClusterNetworkHostPrefix: int64(defaultAgentClusterInstallSpec.Networking.ClusterNetwork[0].HostPrefix), Status: swag.String(models.ClusterStatusInsufficient), - ServiceNetworkCidr: defaultClusterSpec.Provisioning.InstallStrategy.Agent.Networking.ServiceNetwork[0], - IngressVip: defaultClusterSpec.Platform.AgentBareMetal.IngressVIP, - APIVip: defaultClusterSpec.Platform.AgentBareMetal.APIVIP, + ServiceNetworkCidr: defaultAgentClusterInstallSpec.Networking.ServiceNetwork[0], + IngressVip: defaultAgentClusterInstallSpec.IngressVIP, + APIVip: defaultAgentClusterInstallSpec.APIVIP, BaseDNSDomain: defaultClusterSpec.BaseDomain, - SSHPublicKey: defaultClusterSpec.Provisioning.InstallStrategy.Agent.SSHPublicKey, + SSHPublicKey: defaultAgentClusterInstallSpec.SSHPublicKey, InstallConfigOverrides: `{"controlPlane": {"hyperthreading": "Disabled"}}`, }, PullSecret: testPullSecretVal, @@ -1343,14 +1385,14 @@ var _ = Describe("cluster reconcile", func() { ID: &sId, Name: clusterName, OpenshiftVersion: "4.8", - ClusterNetworkCidr: defaultClusterSpec.Provisioning.InstallStrategy.Agent.Networking.ClusterNetwork[0].CIDR, - ClusterNetworkHostPrefix: int64(defaultClusterSpec.Provisioning.InstallStrategy.Agent.Networking.ClusterNetwork[0].HostPrefix), + ClusterNetworkCidr: defaultAgentClusterInstallSpec.Networking.ClusterNetwork[0].CIDR, + ClusterNetworkHostPrefix: int64(defaultAgentClusterInstallSpec.Networking.ClusterNetwork[0].HostPrefix), Status: swag.String(models.ClusterStatusInsufficient), - ServiceNetworkCidr: defaultClusterSpec.Provisioning.InstallStrategy.Agent.Networking.ServiceNetwork[0], - IngressVip: defaultClusterSpec.Platform.AgentBareMetal.IngressVIP, - APIVip: defaultClusterSpec.Platform.AgentBareMetal.APIVIP, + ServiceNetworkCidr: defaultAgentClusterInstallSpec.Networking.ServiceNetwork[0], + IngressVip: defaultAgentClusterInstallSpec.IngressVIP, + APIVip: defaultAgentClusterInstallSpec.APIVIP, BaseDNSDomain: defaultClusterSpec.BaseDomain, - SSHPublicKey: defaultClusterSpec.Provisioning.InstallStrategy.Agent.SSHPublicKey, + SSHPublicKey: defaultAgentClusterInstallSpec.SSHPublicKey, }, PullSecret: testPullSecretVal, } @@ -1360,11 +1402,13 @@ var _ = Describe("cluster reconcile", func() { Expect(c.Create(ctx, pullSecret)).To(BeNil()) cluster := newClusterDeployment(clusterName, testNamespace, defaultClusterSpec) - sshPublicKeySuffixSpace := fmt.Sprintf("%s ", - defaultClusterSpec.Provisioning.InstallStrategy.Agent.SSHPublicKey) - cluster.Spec.Provisioning.InstallStrategy.Agent.SSHPublicKey = sshPublicKeySuffixSpace Expect(c.Create(ctx, cluster)).ShouldNot(HaveOccurred()) + aci := newAgentClusterInstall(agentClusterInstallName, testNamespace, defaultAgentClusterInstallSpec) + sshPublicKeySuffixSpace := fmt.Sprintf("%s ", defaultAgentClusterInstallSpec.SSHPublicKey) + aci.Spec.SSHPublicKey = sshPublicKeySuffixSpace + Expect(c.Create(ctx, aci)).ShouldNot(HaveOccurred()) + request := newClusterDeploymentRequest(cluster) result, err := cr.Reconcile(ctx, request) Expect(err).To(BeNil()) @@ -1375,13 +1419,14 @@ var _ = Describe("cluster reconcile", func() { var _ = Describe("TestConditions", func() { var ( - c client.Client - cr *ClusterDeploymentsReconciler - ctx = context.Background() - mockCtrl *gomock.Controller - backEndCluster *common.Cluster - clusterRequest ctrl.Request - clusterKey types.NamespacedName + c client.Client + cr *ClusterDeploymentsReconciler + ctx = context.Background() + mockCtrl *gomock.Controller + backEndCluster *common.Cluster + clusterRequest ctrl.Request + clusterKey types.NamespacedName + agentClusterInstallKey types.NamespacedName ) BeforeEach(func() { @@ -1401,8 +1446,14 @@ var _ = Describe("TestConditions", func() { Namespace: testNamespace, Name: "clusterDeployment", } - clusterDeployment := newClusterDeployment(clusterKey.Name, clusterKey.Namespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "pull-secret")) + agentClusterInstallKey = types.NamespacedName{ + Namespace: testNamespace, + Name: "agentClusterInstall", + } + clusterDeployment := newClusterDeployment(clusterKey.Name, clusterKey.Namespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", agentClusterInstallKey.Name, "pull-secret")) Expect(c.Create(ctx, clusterDeployment)).To(BeNil()) + aci := newAgentClusterInstall(agentClusterInstallKey.Name, agentClusterInstallKey.Namespace, getDefaultAgentClusterInstallSpec(clusterKey.Name)) + Expect(c.Create(ctx, aci)).ShouldNot(HaveOccurred()) clusterRequest = newClusterDeploymentRequest(clusterDeployment) mockInstallerInternal.EXPECT().GetClusterByKubeKey(gomock.Any()).Return(backEndCluster, nil) }) @@ -1416,22 +1467,22 @@ var _ = Describe("TestConditions", func() { clusterStatus string statusInfo string validationInfo string - conditions []hivev1.ClusterDeploymentCondition + conditions []hivev1.ClusterInstallCondition }{ { name: "Unsufficient", clusterStatus: models.ClusterStatusInsufficient, statusInfo: "", validationInfo: "{\"some-check\":[{\"id\":\"checking1\",\"status\":\"failure\",\"message\":\"Check1 is not OK\"},{\"id\":\"checking2\",\"status\":\"success\",\"message\":\"Check2 is OK\"},{\"id\":\"checking3\",\"status\":\"failure\",\"message\":\"Check3 is not OK\"}]}", - conditions: []hivev1.ClusterDeploymentCondition{ + conditions: []hivev1.ClusterInstallCondition{ { - Type: ClusterReadyForInstallationCondition, + Type: ClusterRequirementsMetCondition, Message: ClusterNotReadyMsg, Reason: ClusterNotReadyReason, Status: corev1.ConditionFalse, }, { - Type: ClusterInstalledCondition, + Type: ClusterCompletedCondition, Message: InstallationNotStartedMsg, Reason: InstallationNotStartedReason, Status: corev1.ConditionFalse, @@ -1449,15 +1500,15 @@ var _ = Describe("TestConditions", func() { clusterStatus: models.ClusterStatusPendingForInput, statusInfo: "", validationInfo: "{\"some-check\":[{\"id\":\"checking1\",\"status\":\"failure\",\"message\":\"Check1 is not OK\"},{\"id\":\"checking2\",\"status\":\"success\",\"message\":\"Check2 is OK\"},{\"id\":\"checking3\",\"status\":\"failure\",\"message\":\"Check3 is not OK\"}]}", - conditions: []hivev1.ClusterDeploymentCondition{ + conditions: []hivev1.ClusterInstallCondition{ { - Type: ClusterReadyForInstallationCondition, + Type: ClusterRequirementsMetCondition, Message: ClusterNotReadyMsg, Reason: ClusterNotReadyReason, Status: corev1.ConditionFalse, }, { - Type: ClusterInstalledCondition, + Type: ClusterCompletedCondition, Message: InstallationNotStartedMsg, Reason: InstallationNotStartedReason, Status: corev1.ConditionFalse, @@ -1475,15 +1526,15 @@ var _ = Describe("TestConditions", func() { clusterStatus: models.ClusterStatusAddingHosts, statusInfo: "Done", validationInfo: "", - conditions: []hivev1.ClusterDeploymentCondition{ + conditions: []hivev1.ClusterInstallCondition{ { - Type: ClusterReadyForInstallationCondition, + Type: ClusterRequirementsMetCondition, Message: ClusterAlreadyInstallingMsg, Reason: ClusterAlreadyInstallingReason, - Status: corev1.ConditionFalse, + Status: corev1.ConditionTrue, }, { - Type: ClusterInstalledCondition, + Type: ClusterCompletedCondition, Message: InstalledMsg + " Done", Reason: InstalledReason, Status: corev1.ConditionTrue, @@ -1501,15 +1552,15 @@ var _ = Describe("TestConditions", func() { clusterStatus: models.ClusterStatusInstalled, statusInfo: "Done", validationInfo: "{\"some-check\":[{\"id\":\"checking2\",\"status\":\"success\",\"message\":\"Check2 is OK\"}]}", - conditions: []hivev1.ClusterDeploymentCondition{ + conditions: []hivev1.ClusterInstallCondition{ { - Type: ClusterReadyForInstallationCondition, + Type: ClusterRequirementsMetCondition, Message: ClusterAlreadyInstallingMsg, Reason: ClusterAlreadyInstallingReason, - Status: corev1.ConditionFalse, + Status: corev1.ConditionTrue, }, { - Type: ClusterInstalledCondition, + Type: ClusterCompletedCondition, Message: InstalledMsg + " Done", Reason: InstalledReason, Status: corev1.ConditionTrue, @@ -1527,15 +1578,15 @@ var _ = Describe("TestConditions", func() { clusterStatus: models.ClusterStatusInstalling, statusInfo: "Phase 1", validationInfo: "{\"some-check\":[{\"id\":\"checking2\",\"status\":\"success\",\"message\":\"Check2 is OK\"}]}", - conditions: []hivev1.ClusterDeploymentCondition{ + conditions: []hivev1.ClusterInstallCondition{ { - Type: ClusterReadyForInstallationCondition, + Type: ClusterRequirementsMetCondition, Message: ClusterAlreadyInstallingMsg, Reason: ClusterAlreadyInstallingReason, - Status: corev1.ConditionFalse, + Status: corev1.ConditionTrue, }, { - Type: ClusterInstalledCondition, + Type: ClusterCompletedCondition, Message: InstallationInProgressMsg + " Phase 1", Reason: InstallationInProgressReason, Status: corev1.ConditionFalse, @@ -1553,15 +1604,15 @@ var _ = Describe("TestConditions", func() { clusterStatus: models.ClusterStatusReady, statusInfo: "", validationInfo: "{\"some-check\":[{\"id\":\"checking2\",\"status\":\"success\",\"message\":\"Check2 is OK\"}]}", - conditions: []hivev1.ClusterDeploymentCondition{ + conditions: []hivev1.ClusterInstallCondition{ { - Type: ClusterReadyForInstallationCondition, + Type: ClusterRequirementsMetCondition, Message: ClusterReadyMsg, Reason: ClusterReadyReason, Status: corev1.ConditionTrue, }, { - Type: ClusterInstalledCondition, + Type: ClusterCompletedCondition, Message: InstallationNotStartedMsg, Reason: InstallationNotStartedReason, Status: corev1.ConditionFalse, @@ -1586,10 +1637,12 @@ var _ = Describe("TestConditions", func() { Expect(err).To(BeNil()) cluster := &hivev1.ClusterDeployment{} Expect(c.Get(ctx, clusterKey, cluster)).To(BeNil()) + clusterInstall := &hiveext.AgentClusterInstall{} + Expect(c.Get(ctx, agentClusterInstallKey, clusterInstall)).To(BeNil()) for _, cond := range t.conditions { - Expect(FindStatusCondition(cluster.Status.Conditions, cond.Type).Message).To(Equal(cond.Message)) - Expect(FindStatusCondition(cluster.Status.Conditions, cond.Type).Reason).To(Equal(cond.Reason)) - Expect(FindStatusCondition(cluster.Status.Conditions, cond.Type).Status).To(Equal(cond.Status)) + Expect(FindStatusCondition(clusterInstall.Status.Conditions, cond.Type).Message).To(Equal(cond.Message)) + Expect(FindStatusCondition(clusterInstall.Status.Conditions, cond.Type).Reason).To(Equal(cond.Reason)) + Expect(FindStatusCondition(clusterInstall.Status.Conditions, cond.Type).Status).To(Equal(cond.Status)) } }) diff --git a/internal/controller/controllers/conditions.go b/internal/controller/controllers/conditions.go index 441f914cf33..2b39cb795b3 100644 --- a/internal/controller/controllers/conditions.go +++ b/internal/controller/controllers/conditions.go @@ -49,23 +49,23 @@ const ( UnknownStatusReason string = "UnknownStatus" UnknownStatusMsg string = "The installation status is currently not recognized:" - //ClusterDeployment Conditions - ClusterSpecSyncedCondition hivev1.ClusterDeploymentConditionType = "SpecSynced" - - ClusterInstalledCondition hivev1.ClusterDeploymentConditionType = "Installed" - - ClusterReadyForInstallationCondition hivev1.ClusterDeploymentConditionType = "ReadyForInstallation" - ClusterReadyReason string = "ClusterIsReady" - ClusterReadyMsg string = "The cluster is ready to begin the installation" - ClusterNotReadyReason string = "ClusterNotReady" - ClusterNotReadyMsg string = "The cluster is not ready to begin the installation" - ClusterAlreadyInstallingReason string = "ClusterAlreadyInstalling" - ClusterAlreadyInstallingMsg string = "The cluster cannot begin the installation because it has already started" - - ClusterValidatedCondition hivev1.ClusterDeploymentConditionType = "Validated" - ClusterValidationsOKMsg string = "The cluster's validations are passing" - ClusterValidationsUnknownMsg string = "The cluster's validations have not yet been calculated" - ClusterValidationsFailingMsg string = "The cluster's validations are failing:" + // ClusterInstall Conditions + ClusterSpecSyncedCondition string = "SpecSynced" + + ClusterCompletedCondition string = hivev1.ClusterInstallCompleted + + ClusterRequirementsMetCondition string = hivev1.ClusterInstallRequirementsMet + ClusterReadyReason string = "ClusterIsReady" + ClusterReadyMsg string = "The cluster is ready to begin the installation" + ClusterNotReadyReason string = "ClusterNotReady" + ClusterNotReadyMsg string = "The cluster is not ready to begin the installation" + ClusterAlreadyInstallingReason string = "ClusterAlreadyInstalling" + ClusterAlreadyInstallingMsg string = "The cluster requirements are met" + + ClusterValidatedCondition string = "Validated" + ClusterValidationsOKMsg string = "The cluster's validations are passing" + ClusterValidationsUnknownMsg string = "The cluster's validations have not yet been calculated" + ClusterValidationsFailingMsg string = "The cluster's validations are failing:" //Agent Conditions SpecSyncedCondition conditionsv1.ConditionType = "SpecSynced" diff --git a/internal/controller/controllers/controllers_suite_test.go b/internal/controller/controllers/controllers_suite_test.go index 593d9cf8e40..0190d30565b 100644 --- a/internal/controller/controllers/controllers_suite_test.go +++ b/internal/controller/controllers/controllers_suite_test.go @@ -7,6 +7,7 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" routev1 "github.com/openshift/api/route/v1" + hiveext "github.com/openshift/assisted-service/internal/controller/api/hiveextension/v1beta1" "github.com/openshift/assisted-service/internal/controller/api/v1beta1" hivev1 "github.com/openshift/hive/apis/hive/v1" corev1 "k8s.io/api/core/v1" @@ -19,6 +20,7 @@ import ( func init() { _ = v1beta1.AddToScheme(scheme.Scheme) _ = hivev1.AddToScheme(scheme.Scheme) + _ = hiveext.AddToScheme(scheme.Scheme) _ = bmh_v1alpha1.AddToScheme(scheme.Scheme) _ = routev1.AddToScheme(scheme.Scheme) } diff --git a/internal/controller/controllers/infraenv_controller_test.go b/internal/controller/controllers/infraenv_controller_test.go index 09f74080bcc..1282bb1ddd2 100644 --- a/internal/controller/controllers/infraenv_controller_test.go +++ b/internal/controller/controllers/infraenv_controller_test.go @@ -114,7 +114,7 @@ var _ = Describe("infraEnv reconcile", func() { imageInfo := models.ImageInfo{ DownloadURL: "downloadurl", } - clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "pull-secret")) + clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "test-cluster-aci", "pull-secret")) Expect(c.Create(ctx, clusterDeployment)).To(BeNil()) mockInstallerInternal.EXPECT().GetClusterByKubeKey(gomock.Any()).Return(backEndCluster, nil) mockInstallerInternal.EXPECT().GenerateClusterISOInternal(gomock.Any(), gomock.Any()). @@ -147,7 +147,7 @@ var _ = Describe("infraEnv reconcile", func() { imageInfo := models.ImageInfo{ DownloadURL: "downloadurl", } - clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "pull-secret")) + clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "test-cluster-aci", "pull-secret")) Expect(c.Create(ctx, clusterDeployment)).To(BeNil()) mockInstallerInternal.EXPECT().GetClusterByKubeKey(gomock.Any()).Return(backEndCluster, nil) mockInstallerInternal.EXPECT().GenerateClusterISOInternal(gomock.Any(), gomock.Any()). @@ -177,7 +177,7 @@ var _ = Describe("infraEnv reconcile", func() { }) It("create new infraEnv image - backend failure", func() { - clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "pull-secret")) + clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "test-cluster-aci", "pull-secret")) Expect(c.Create(ctx, clusterDeployment)).To(BeNil()) expectedError := common.NewApiError(http.StatusInternalServerError, errors.New("server error")) @@ -209,7 +209,7 @@ var _ = Describe("infraEnv reconcile", func() { }) It("create new infraEnv image - cluster not retrieved from database", func() { - clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "pull-secret")) + clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "test-cluster-aci", "pull-secret")) Expect(c.Create(ctx, clusterDeployment)).To(BeNil()) expectedError := common.NewApiError(http.StatusInternalServerError, errors.New("server error")) @@ -235,7 +235,7 @@ var _ = Describe("infraEnv reconcile", func() { }) It("create new infraEnv image - cluster not found in database", func() { - clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "pull-secret")) + clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "test-cluster-aci", "pull-secret")) Expect(c.Create(ctx, clusterDeployment)).To(BeNil()) mockInstallerInternal.EXPECT().GetClusterByKubeKey(gomock.Any()).Return(nil, gorm.ErrRecordNotFound) infraEnvImage := newInfraEnvImage("infraEnvImage", testNamespace, aiv1beta1.InfraEnvSpec{ @@ -259,7 +259,7 @@ var _ = Describe("infraEnv reconcile", func() { }) It("create new infraEnv image - while image is being created", func() { - clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "pull-secret")) + clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "test-cluster-aci", "pull-secret")) Expect(c.Create(ctx, clusterDeployment)).To(BeNil()) expectedError := common.NewApiError(http.StatusConflict, errors.New("Another request to generate an image has been recently submitted. Please wait a few seconds and try again.")) @@ -290,7 +290,7 @@ var _ = Describe("infraEnv reconcile", func() { }) It("create new image - client failure", func() { - clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "pull-secret")) + clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "test-cluster-aci", "pull-secret")) Expect(c.Create(ctx, clusterDeployment)).To(BeNil()) expectedError := common.NewApiError(http.StatusBadRequest, errors.New("client error")) @@ -348,7 +348,7 @@ var _ = Describe("infraEnv reconcile", func() { It("create image with proxy configuration and ntp sources", func() { imageInfo := models.ImageInfo{DownloadURL: "downloadurl"} - clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "pull-secret")) + clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "test-cluster-aci", "pull-secret")) Expect(c.Create(ctx, clusterDeployment)).To(BeNil()) infraEnvImage := newInfraEnvImage("infraEnvImage", testNamespace, aiv1beta1.InfraEnvSpec{ @@ -376,7 +376,7 @@ var _ = Describe("infraEnv reconcile", func() { It("create image with ignition config override", func() { imageInfo := models.ImageInfo{DownloadURL: "downloadurl"} ignitionConfigOverride := `{"ignition": {"version": "3.1.0"}, "storage": {"files": [{"path": "/tmp/example", "contents": {"source": "data:text/plain;base64,aGVscGltdHJhcHBlZGluYXN3YWdnZXJzcGVj"}}]}}` - clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "pull-secret")) + clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "test-cluster-aci", "pull-secret")) Expect(c.Create(ctx, clusterDeployment)).To(BeNil()) infraEnvImage := newInfraEnvImage("infraEnvImage", testNamespace, aiv1beta1.InfraEnvSpec{ @@ -400,7 +400,7 @@ var _ = Describe("infraEnv reconcile", func() { It("create image with an invalid ignition config override", func() { ignitionConfigOverride := `bad ignition config` - clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "pull-secret")) + clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "test-cluster-aci", "pull-secret")) Expect(c.Create(ctx, clusterDeployment)).To(BeNil()) infraEnvImage := newInfraEnvImage("infraEnvImage", testNamespace, aiv1beta1.InfraEnvSpec{ @@ -419,7 +419,7 @@ var _ = Describe("infraEnv reconcile", func() { }) It("failed to update cluster with proxy", func() { - clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "pull-secret")) + clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "test-cluster-aci", "pull-secret")) Expect(c.Create(ctx, clusterDeployment)).To(BeNil()) infraEnvImage := newInfraEnvImage("infraEnvImage", testNamespace, aiv1beta1.InfraEnvSpec{ @@ -475,7 +475,7 @@ var _ = Describe("infraEnv reconcile", func() { NetConfig: aiv1beta1.NetConfig{Raw: []byte(hostStaticNetworkConfig.NetworkYaml)}, }) Expect(c.Create(ctx, nmstateConfig)).To(BeNil()) - clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "pull-secret")) + clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "test-cluster-aci", "pull-secret")) Expect(c.Create(ctx, clusterDeployment)).To(BeNil()) mockInstallerInternal.EXPECT().GetClusterByKubeKey(gomock.Any()).Return(backEndCluster, nil) mockInstallerInternal.EXPECT().AddOpenshiftVersion(gomock.Any(), gomock.Any(), gomock.Any()).Return(openshiftVersion, nil) @@ -516,7 +516,7 @@ var _ = Describe("infraEnv reconcile", func() { NetConfig: aiv1beta1.NetConfig{Raw: []byte(hostStaticNetworkConfig.NetworkYaml)}, }) Expect(c.Create(ctx, nmstateConfig)).To(BeNil()) - clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "pull-secret")) + clusterDeployment := newClusterDeployment("clusterDeployment", testNamespace, getDefaultClusterDeploymentSpec("clusterDeployment-test", "test-cluster-aci", "pull-secret")) Expect(c.Create(ctx, clusterDeployment)).To(BeNil()) mockInstallerInternal.EXPECT().GetClusterByKubeKey(gomock.Any()).Return(backEndCluster, nil) mockInstallerInternal.EXPECT().AddOpenshiftVersion(gomock.Any(), gomock.Any(), gomock.Any()).Return(openshiftVersion, nil) diff --git a/subsystem/kubeapi_test.go b/subsystem/kubeapi_test.go index 683da4cccc0..61e352b20ea 100644 --- a/subsystem/kubeapi_test.go +++ b/subsystem/kubeapi_test.go @@ -18,6 +18,7 @@ import ( "github.com/openshift/assisted-service/client" "github.com/openshift/assisted-service/client/installer" "github.com/openshift/assisted-service/internal/common" + hiveext "github.com/openshift/assisted-service/internal/controller/api/hiveextension/v1beta1" "github.com/openshift/assisted-service/internal/controller/api/v1beta1" "github.com/openshift/assisted-service/internal/controller/controllers" "github.com/openshift/assisted-service/internal/gencrypto" @@ -35,8 +36,10 @@ import ( ) const ( - fakeIgnitionConfigOverride = `{"ignition": {"version": "3.1.0"}, "storage": {"files": [{"path": "/tmp/example", "contents": {"source": "data:text/plain;base64,aGVscGltdHJhcHBlZGluYXN3YWdnZXJzcGVj"}}]}}` - badIgnitionConfigOverride = `bad ignition config` + fakeIgnitionConfigOverride = `{"ignition": {"version": "3.1.0"}, "storage": {"files": [{"path": "/tmp/example", "contents": {"source": "data:text/plain;base64,aGVscGltdHJhcHBlZGluYXN3YWdnZXJzcGVj"}}]}}` + badIgnitionConfigOverride = `bad ignition config` + clusterDeploymentName = "test-cluster" + clusterAgentCLusterInstallName = "test-agent-cluster-install" ) var ( @@ -106,8 +109,23 @@ func deployPullSecretResource(ctx context.Context, client k8sclient.Client, name Expect(client.Create(ctx, s)).To(BeNil()) } +func deployAgentClusterInstallCRD(ctx context.Context, client k8sclient.Client, spec *hiveext.AgentClusterInstallSpec) { + deployClusterImageSetCRD(ctx, client, spec.ImageSetRef) + err := client.Create(ctx, &hiveext.AgentClusterInstall{ + TypeMeta: metav1.TypeMeta{ + Kind: "AgentClusterInstall", + APIVersion: "hiveextension/v1beta1", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: Options.Namespace, + Name: clusterAgentCLusterInstallName, + }, + Spec: *spec, + }) + Expect(err).To(BeNil()) +} + func deployClusterDeploymentCRD(ctx context.Context, client k8sclient.Client, spec *hivev1.ClusterDeploymentSpec) { - deployClusterImageSetCRD(ctx, client, spec.Provisioning.ImageSetRef) err := client.Create(ctx, &hivev1.ClusterDeployment{ TypeMeta: metav1.TypeMeta{ Kind: "ClusterDeployment", @@ -145,7 +163,7 @@ func addAnnotationToClusterDeployment(ctx context.Context, client k8sclient.Clie }, "30s", "10s").Should(BeNil()) } -func deployClusterImageSetCRD(ctx context.Context, client k8sclient.Client, imageSetRef *hivev1.ClusterImageSetReference) { +func deployClusterImageSetCRD(ctx context.Context, client k8sclient.Client, imageSetRef hivev1.ClusterImageSetReference) { err := client.Create(ctx, &hivev1.ClusterImageSet{ TypeMeta: metav1.TypeMeta{ Kind: "ClusterImageSet", @@ -226,6 +244,13 @@ func getClusterDeploymentCRD(ctx context.Context, client k8sclient.Client, key t return cluster } +func getAgentClusterInstallCRD(ctx context.Context, client k8sclient.Client, key types.NamespacedName) *hiveext.AgentClusterInstall { + cluster := &hiveext.AgentClusterInstall{} + err := client.Get(ctx, key, cluster) + Expect(err).To(BeNil()) + return cluster +} + func getInfraEnvCRD(ctx context.Context, client k8sclient.Client, key types.NamespacedName) *v1beta1.InfraEnv { infraEnv := &v1beta1.InfraEnv{} err := client.Get(ctx, key, infraEnv) @@ -282,9 +307,9 @@ func checkAgentCondition(ctx context.Context, hostId string, conditionType condi }, "30s", "10s").Should(Equal(reason)) } -func checkClusterCondition(ctx context.Context, key types.NamespacedName, conditionType hivev1.ClusterDeploymentConditionType, reason string) { +func checkAgentClusterInstallCondition(ctx context.Context, key types.NamespacedName, conditionType string, reason string) { Eventually(func() string { - condition := controllers.FindStatusCondition(getClusterDeploymentCRD(ctx, kubeClient, key).Status.Conditions, conditionType) + condition := controllers.FindStatusCondition(getAgentClusterInstallCRD(ctx, kubeClient, key).Status.Conditions, conditionType) if condition != nil { return condition.Reason } @@ -304,68 +329,60 @@ func checkInfraEnvCondition(ctx context.Context, key types.NamespacedName, condi func getDefaultClusterDeploymentSpec(secretRef *corev1.LocalObjectReference) *hivev1.ClusterDeploymentSpec { return &hivev1.ClusterDeploymentSpec{ - ClusterName: "test-cluster", + ClusterName: clusterDeploymentName, BaseDomain: "hive.example.com", - Provisioning: &hivev1.Provisioning{ - InstallConfigSecretRef: &corev1.LocalObjectReference{Name: "cluster-install-config"}, - ImageSetRef: &hivev1.ClusterImageSetReference{Name: "openshift-v4.8.0"}, - InstallStrategy: &hivev1.InstallStrategy{ - Agent: &agentv1.InstallStrategy{ - Networking: agentv1.Networking{ - MachineNetwork: []agentv1.MachineNetworkEntry{}, - ClusterNetwork: []agentv1.ClusterNetworkEntry{{ - CIDR: "10.128.0.0/14", - HostPrefix: 23, - }}, - ServiceNetwork: []string{"172.30.0.0/16"}, - }, - SSHPublicKey: sshPublicKey, - ProvisionRequirements: agentv1.ProvisionRequirements{ - ControlPlaneAgents: 3, - WorkerAgents: 0, - }, - }, - }, - }, Platform: hivev1.Platform{ - AgentBareMetal: &agentv1.BareMetalPlatform{ - APIVIP: "1.2.3.8", - IngressVIP: "1.2.3.9", - }, + AgentBareMetal: &agentv1.BareMetalPlatform{}, }, PullSecretRef: secretRef, + ClusterInstallRef: &hivev1.ClusterInstallLocalReference{ + Group: hiveext.Group, + Version: hiveext.Version, + Kind: "AgentClusterInstall", + Name: clusterAgentCLusterInstallName, + }, } } -func getDefaultClusterDeploymentSNOSpec(secretRef *corev1.LocalObjectReference) *hivev1.ClusterDeploymentSpec { - return &hivev1.ClusterDeploymentSpec{ - ClusterName: "test-cluster-sno", - BaseDomain: "hive.example.com", - Provisioning: &hivev1.Provisioning{ - InstallConfigSecretRef: &corev1.LocalObjectReference{Name: "cluster-install-config"}, - ImageSetRef: &hivev1.ClusterImageSetReference{Name: "openshift-v4.8.0"}, - InstallStrategy: &hivev1.InstallStrategy{ - Agent: &agentv1.InstallStrategy{ - Networking: agentv1.Networking{ - MachineNetwork: []agentv1.MachineNetworkEntry{{CIDR: "1.2.3.0/24"}}, - ClusterNetwork: []agentv1.ClusterNetworkEntry{{ - CIDR: "10.128.0.0/14", - HostPrefix: 23, - }}, - ServiceNetwork: []string{"172.30.0.0/16"}, - }, - SSHPublicKey: sshPublicKey, - ProvisionRequirements: agentv1.ProvisionRequirements{ - ControlPlaneAgents: 1, - WorkerAgents: 0, - }, - }, - }, +func getDefaultAgentClusterInstallSpec() *hiveext.AgentClusterInstallSpec { + return &hiveext.AgentClusterInstallSpec{ + Networking: hiveext.Networking{ + MachineNetwork: []hiveext.MachineNetworkEntry{}, + ClusterNetwork: []hiveext.ClusterNetworkEntry{{ + CIDR: "10.128.0.0/14", + HostPrefix: 23, + }}, + ServiceNetwork: []string{"172.30.0.0/16"}, }, - Platform: hivev1.Platform{ - AgentBareMetal: &agentv1.BareMetalPlatform{}, + SSHPublicKey: sshPublicKey, + ImageSetRef: hivev1.ClusterImageSetReference{Name: "openshift-v4.8.0"}, + ProvisionRequirements: hiveext.ProvisionRequirements{ + ControlPlaneAgents: 3, + WorkerAgents: 0, }, - PullSecretRef: secretRef, + APIVIP: "1.2.3.8", + IngressVIP: "1.2.3.9", + ClusterDeploymentRef: corev1.LocalObjectReference{Name: clusterDeploymentName}, + } +} + +func getDefaultSNOAgentClusterInstallSpec() *hiveext.AgentClusterInstallSpec { + return &hiveext.AgentClusterInstallSpec{ + Networking: hiveext.Networking{ + MachineNetwork: []hiveext.MachineNetworkEntry{{CIDR: "1.2.3.0/24"}}, + ClusterNetwork: []hiveext.ClusterNetworkEntry{{ + CIDR: "10.128.0.0/14", + HostPrefix: 23, + }}, + ServiceNetwork: []string{"172.30.0.0/16"}, + }, + SSHPublicKey: sshPublicKey, + ImageSetRef: hivev1.ClusterImageSetReference{Name: "openshift-v4.8.0"}, + ProvisionRequirements: hiveext.ProvisionRequirements{ + ControlPlaneAgents: 1, + WorkerAgents: 0, + }, + ClusterDeploymentRef: corev1.LocalObjectReference{Name: clusterDeploymentName}, } } @@ -420,6 +437,7 @@ func cleanUP(ctx context.Context, client k8sclient.Client) { Type: corev1.SecretTypeDockerConfigJson, } Expect(client.Delete(ctx, ps)).To(BeNil()) + Expect(client.DeleteAllOf(ctx, &hiveext.AgentClusterInstall{}, k8sclient.InNamespace(Options.Namespace))).To(BeNil()) } func setupNewHost(ctx context.Context, hostname string, clusterID strfmt.UUID) *models.Host { @@ -448,6 +466,8 @@ var _ = Describe("[kube-api]cluster installation", func() { secretRef := deployLocalObjectSecretIfNeeded(ctx, kubeClient) spec := getDefaultClusterDeploymentSpec(secretRef) deployClusterDeploymentCRD(ctx, kubeClient, spec) + aciSpec := getDefaultAgentClusterInstallSpec() + deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec) key := types.NamespacedName{ Namespace: Options.Namespace, Name: spec.ClusterName, @@ -460,6 +480,9 @@ var _ = Describe("[kube-api]cluster installation", func() { host := setupNewHost(ctx, hostname, *cluster.ID) hosts = append(hosts, host) } + for _, host := range hosts { + checkAgentCondition(ctx, host.ID.String(), controllers.ValidatedCondition, controllers.ValidationsFailingReason) + } generateFullMeshConnectivity(ctx, "1.2.3.10", hosts...) for _, host := range hosts { hostkey := types.NamespacedName{ @@ -472,13 +495,19 @@ var _ = Describe("[kube-api]cluster installation", func() { return kubeClient.Update(ctx, agent) }, "30s", "10s").Should(BeNil()) } - checkClusterCondition(ctx, key, controllers.ClusterReadyForInstallationCondition, controllers.ClusterAlreadyInstallingReason) + installkey := types.NamespacedName{ + Namespace: Options.Namespace, + Name: clusterAgentCLusterInstallName, + } + checkAgentClusterInstallCondition(ctx, installkey, controllers.ClusterRequirementsMetCondition, controllers.ClusterAlreadyInstallingReason) }) It("deploy clusterDeployment with agent and update agent", func() { secretRef := deployLocalObjectSecretIfNeeded(ctx, kubeClient) spec := getDefaultClusterDeploymentSpec(secretRef) deployClusterDeploymentCRD(ctx, kubeClient, spec) + aciSpec := getDefaultAgentClusterInstallSpec() + deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec) key := types.NamespacedName{ Namespace: Options.Namespace, Name: spec.ClusterName, @@ -520,6 +549,8 @@ var _ = Describe("[kube-api]cluster installation", func() { secretRef := deployLocalObjectSecretIfNeeded(ctx, kubeClient) spec := getDefaultClusterDeploymentSpec(secretRef) deployClusterDeploymentCRD(ctx, kubeClient, spec) + aciSpec := getDefaultAgentClusterInstallSpec() + deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec) key := types.NamespacedName{ Namespace: Options.Namespace, Name: spec.ClusterName, @@ -587,6 +618,8 @@ var _ = Describe("[kube-api]cluster installation", func() { secretRef := deployLocalObjectSecretIfNeeded(ctx, kubeClient) spec := getDefaultClusterDeploymentSpec(secretRef) deployClusterDeploymentCRD(ctx, kubeClient, spec) + aciSpec := getDefaultAgentClusterInstallSpec() + deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec) key := types.NamespacedName{ Namespace: Options.Namespace, Name: spec.ClusterName, @@ -626,6 +659,8 @@ var _ = Describe("[kube-api]cluster installation", func() { secretRef := deployLocalObjectSecretIfNeeded(ctx, kubeClient) spec := getDefaultClusterDeploymentSpec(secretRef) deployClusterDeploymentCRD(ctx, kubeClient, spec) + aciSpec := getDefaultAgentClusterInstallSpec() + deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec) key := types.NamespacedName{ Namespace: Options.Namespace, Name: spec.ClusterName, @@ -689,6 +724,8 @@ var _ = Describe("[kube-api]cluster installation", func() { secretRef := deployLocalObjectSecretIfNeeded(ctx, kubeClient) spec := getDefaultClusterDeploymentSpec(secretRef) deployClusterDeploymentCRD(ctx, kubeClient, spec) + aciSpec := getDefaultAgentClusterInstallSpec() + deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec) key := types.NamespacedName{ Namespace: Options.Namespace, Name: spec.ClusterName, @@ -748,6 +785,8 @@ var _ = Describe("[kube-api]cluster installation", func() { secretRef := deployLocalObjectSecretIfNeeded(ctx, kubeClient) spec := getDefaultClusterDeploymentSpec(secretRef) deployClusterDeploymentCRD(ctx, kubeClient, spec) + aciSpec := getDefaultAgentClusterInstallSpec() + deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec) key := types.NamespacedName{ Namespace: Options.Namespace, Name: spec.ClusterName, @@ -826,13 +865,19 @@ var _ = Describe("[kube-api]cluster installation", func() { It("deploy clusterDeployment and infraEnv and verify cluster updates", func() { infraEnvName := "infraenv" secretRef := deployLocalObjectSecretIfNeeded(ctx, kubeClient) - clusterDeploymentSpec := getDefaultClusterDeploymentSNOSpec(secretRef) + clusterDeploymentSpec := getDefaultClusterDeploymentSpec(secretRef) deployClusterDeploymentCRD(ctx, kubeClient, clusterDeploymentSpec) + aciSpec := getDefaultSNOAgentClusterInstallSpec() + deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec) clusterKubeName := types.NamespacedName{ Namespace: Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } - checkClusterCondition(ctx, clusterKubeName, controllers.ClusterReadyForInstallationCondition, controllers.ClusterNotReadyReason) + installkey := types.NamespacedName{ + Namespace: Options.Namespace, + Name: clusterAgentCLusterInstallName, + } + checkAgentClusterInstallCondition(ctx, installkey, controllers.ClusterRequirementsMetCondition, controllers.ClusterNotReadyReason) cluster := getClusterFromDB(ctx, kubeClient, db, clusterKubeName, waitForReconcileTimeout) configureLocalAgentClient(cluster.ID.String()) Expect(cluster.NoProxy).Should(Equal("")) @@ -870,13 +915,19 @@ var _ = Describe("[kube-api]cluster installation", func() { It("deploy clusterDeployment and infraEnv with ignition override", func() { infraEnvName := "infraenv" secretRef := deployLocalObjectSecretIfNeeded(ctx, kubeClient) - clusterDeploymentSpec := getDefaultClusterDeploymentSNOSpec(secretRef) + clusterDeploymentSpec := getDefaultClusterDeploymentSpec(secretRef) deployClusterDeploymentCRD(ctx, kubeClient, clusterDeploymentSpec) + aciSpec := getDefaultSNOAgentClusterInstallSpec() + deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec) clusterKubeName := types.NamespacedName{ Namespace: Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } - checkClusterCondition(ctx, clusterKubeName, controllers.ClusterReadyForInstallationCondition, controllers.ClusterNotReadyReason) + installkey := types.NamespacedName{ + Namespace: Options.Namespace, + Name: clusterAgentCLusterInstallName, + } + checkAgentClusterInstallCondition(ctx, installkey, controllers.ClusterRequirementsMetCondition, controllers.ClusterNotReadyReason) cluster := getClusterFromDB(ctx, kubeClient, db, clusterKubeName, waitForReconcileTimeout) configureLocalAgentClient(cluster.ID.String()) @@ -900,7 +951,7 @@ var _ = Describe("[kube-api]cluster installation", func() { It("deploy infraEnv before clusterDeployment", func() { infraEnvName := "infraenv" secretRef := deployLocalObjectSecretIfNeeded(ctx, kubeClient) - clusterDeploymentSpec := getDefaultClusterDeploymentSNOSpec(secretRef) + clusterDeploymentSpec := getDefaultClusterDeploymentSpec(secretRef) infraEnvSpec := getDefaultInfraEnvSpec(secretRef, clusterDeploymentSpec) deployInfraEnvCRD(ctx, kubeClient, infraEnvName, infraEnvSpec) @@ -909,11 +960,17 @@ var _ = Describe("[kube-api]cluster installation", func() { Name: infraEnvName, } deployClusterDeploymentCRD(ctx, kubeClient, clusterDeploymentSpec) + aciSpec := getDefaultSNOAgentClusterInstallSpec() + deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec) clusterKubeName := types.NamespacedName{ Namespace: Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } - checkClusterCondition(ctx, clusterKubeName, controllers.ClusterReadyForInstallationCondition, controllers.ClusterNotReadyReason) + installkey := types.NamespacedName{ + Namespace: Options.Namespace, + Name: clusterAgentCLusterInstallName, + } + checkAgentClusterInstallCondition(ctx, installkey, controllers.ClusterRequirementsMetCondition, controllers.ClusterNotReadyReason) cluster := getClusterFromDB(ctx, kubeClient, db, clusterKubeName, waitForReconcileTimeout) configureLocalAgentClient(cluster.ID.String()) @@ -925,13 +982,19 @@ var _ = Describe("[kube-api]cluster installation", func() { It("deploy clusterDeployment and infraEnv and with an invalid ignition override", func() { infraEnvName := "infraenv" secretRef := deployLocalObjectSecretIfNeeded(ctx, kubeClient) - clusterDeploymentSpec := getDefaultClusterDeploymentSNOSpec(secretRef) + clusterDeploymentSpec := getDefaultClusterDeploymentSpec(secretRef) deployClusterDeploymentCRD(ctx, kubeClient, clusterDeploymentSpec) + aciSpec := getDefaultSNOAgentClusterInstallSpec() + deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec) clusterKubeName := types.NamespacedName{ Namespace: Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } - checkClusterCondition(ctx, clusterKubeName, controllers.ClusterReadyForInstallationCondition, controllers.ClusterNotReadyReason) + installkey := types.NamespacedName{ + Namespace: Options.Namespace, + Name: clusterAgentCLusterInstallName, + } + checkAgentClusterInstallCondition(ctx, installkey, controllers.ClusterRequirementsMetCondition, controllers.ClusterNotReadyReason) cluster := getClusterFromDB(ctx, kubeClient, db, clusterKubeName, waitForReconcileTimeout) configureLocalAgentClient(cluster.ID.String()) Expect(cluster.IgnitionConfigOverrides).Should(Equal("")) @@ -953,13 +1016,19 @@ var _ = Describe("[kube-api]cluster installation", func() { It("deploy clusterDeployment with install config override", func() { secretRef := deployLocalObjectSecretIfNeeded(ctx, kubeClient) - clusterDeploymentSpec := getDefaultClusterDeploymentSNOSpec(secretRef) + clusterDeploymentSpec := getDefaultClusterDeploymentSpec(secretRef) deployClusterDeploymentCRD(ctx, kubeClient, clusterDeploymentSpec) + aciSpec := getDefaultSNOAgentClusterInstallSpec() + deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec) clusterKubeName := types.NamespacedName{ Namespace: Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } - checkClusterCondition(ctx, clusterKubeName, controllers.ClusterReadyForInstallationCondition, controllers.ClusterNotReadyReason) + installkey := types.NamespacedName{ + Namespace: Options.Namespace, + Name: clusterAgentCLusterInstallName, + } + checkAgentClusterInstallCondition(ctx, installkey, controllers.ClusterRequirementsMetCondition, controllers.ClusterNotReadyReason) cluster := getClusterFromDB(ctx, kubeClient, db, clusterKubeName, waitForReconcileTimeout) Expect(cluster.InstallConfigOverrides).Should(Equal("")) @@ -977,19 +1046,25 @@ var _ = Describe("[kube-api]cluster installation", func() { It("deploy clusterDeployment with malformed install config override", func() { secretRef := deployLocalObjectSecretIfNeeded(ctx, kubeClient) - clusterDeploymentSpec := getDefaultClusterDeploymentSNOSpec(secretRef) + clusterDeploymentSpec := getDefaultClusterDeploymentSpec(secretRef) deployClusterDeploymentCRD(ctx, kubeClient, clusterDeploymentSpec) + aciSpec := getDefaultSNOAgentClusterInstallSpec() + deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec) clusterKubeName := types.NamespacedName{ Namespace: Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } - checkClusterCondition(ctx, clusterKubeName, controllers.ClusterReadyForInstallationCondition, controllers.ClusterNotReadyReason) + installkey := types.NamespacedName{ + Namespace: Options.Namespace, + Name: clusterAgentCLusterInstallName, + } + checkAgentClusterInstallCondition(ctx, installkey, controllers.ClusterRequirementsMetCondition, controllers.ClusterNotReadyReason) cluster := getClusterFromDB(ctx, kubeClient, db, clusterKubeName, waitForReconcileTimeout) Expect(cluster.InstallConfigOverrides).Should(Equal("")) installConfigOverrides := `{"controlPlane": "malformed json": "Enabled"}}` addAnnotationToClusterDeployment(ctx, kubeClient, clusterKubeName, controllers.InstallConfigOverrides, installConfigOverrides) - checkClusterCondition(ctx, clusterKubeName, controllers.ClusterSpecSyncedCondition, controllers.InputErrorReason) + checkAgentClusterInstallCondition(ctx, installkey, controllers.ClusterSpecSyncedCondition, controllers.InputErrorReason) cluster = getClusterFromDB(ctx, kubeClient, db, clusterKubeName, waitForReconcileTimeout) Expect(cluster.InstallConfigOverrides).Should(Equal("")) }) @@ -1016,13 +1091,19 @@ var _ = Describe("[kube-api]cluster installation", func() { deployNMStateConfigCRD(ctx, kubeClient, "nmstate1", NMStateLabelName, NMStateLabelValue, nmstateConfigSpec) infraEnvName := "infraenv" secretRef := deployLocalObjectSecretIfNeeded(ctx, kubeClient) - clusterDeploymentSpec := getDefaultClusterDeploymentSNOSpec(secretRef) + clusterDeploymentSpec := getDefaultClusterDeploymentSpec(secretRef) deployClusterDeploymentCRD(ctx, kubeClient, clusterDeploymentSpec) + aciSpec := getDefaultSNOAgentClusterInstallSpec() + deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec) clusterKubeName := types.NamespacedName{ Namespace: Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } - checkClusterCondition(ctx, clusterKubeName, controllers.ClusterReadyForInstallationCondition, controllers.ClusterNotReadyReason) + installkey := types.NamespacedName{ + Namespace: Options.Namespace, + Name: clusterAgentCLusterInstallName, + } + checkAgentClusterInstallCondition(ctx, installkey, controllers.ClusterRequirementsMetCondition, controllers.ClusterNotReadyReason) infraEnvSpec := getDefaultInfraEnvSpec(secretRef, clusterDeploymentSpec) infraEnvSpec.NMStateConfigLabelSelector = metav1.LabelSelector{MatchLabels: map[string]string{NMStateLabelName: NMStateLabelValue}} deployInfraEnvCRD(ctx, kubeClient, infraEnvName, infraEnvSpec) @@ -1051,13 +1132,19 @@ var _ = Describe("[kube-api]cluster installation", func() { deployNMStateConfigCRD(ctx, kubeClient, "nmstate2", NMStateLabelName, NMStateLabelValue, nmstateConfigSpec) infraEnvName := "infraenv" secretRef := deployLocalObjectSecretIfNeeded(ctx, kubeClient) - clusterDeploymentSpec := getDefaultClusterDeploymentSNOSpec(secretRef) + clusterDeploymentSpec := getDefaultClusterDeploymentSpec(secretRef) deployClusterDeploymentCRD(ctx, kubeClient, clusterDeploymentSpec) + aciSpec := getDefaultSNOAgentClusterInstallSpec() + deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec) clusterKubeName := types.NamespacedName{ Namespace: Options.Namespace, Name: clusterDeploymentSpec.ClusterName, } - checkClusterCondition(ctx, clusterKubeName, controllers.ClusterReadyForInstallationCondition, controllers.ClusterNotReadyReason) + installkey := types.NamespacedName{ + Namespace: Options.Namespace, + Name: clusterAgentCLusterInstallName, + } + checkAgentClusterInstallCondition(ctx, installkey, controllers.ClusterRequirementsMetCondition, controllers.ClusterNotReadyReason) infraEnvSpec := getDefaultInfraEnvSpec(secretRef, clusterDeploymentSpec) infraEnvSpec.NMStateConfigLabelSelector = metav1.LabelSelector{MatchLabels: map[string]string{NMStateLabelName: NMStateLabelValue}} deployInfraEnvCRD(ctx, kubeClient, infraEnvName, infraEnvSpec) @@ -1074,14 +1161,13 @@ var _ = Describe("[kube-api]cluster installation", func() { It("SNO deploy clusterDeployment full install and validate MetaData", func() { By("Create cluster") secretRef := deployLocalObjectSecretIfNeeded(ctx, kubeClient) - spec := getDefaultClusterDeploymentSNOSpec(secretRef) - - // Add space suffix to SSHPublicKey to validate proper install - sshPublicKeySuffixSpace := fmt.Sprintf("%s ", - spec.Provisioning.InstallStrategy.Agent.SSHPublicKey) - spec.Provisioning.InstallStrategy.Agent.SSHPublicKey = sshPublicKeySuffixSpace - + spec := getDefaultClusterDeploymentSpec(secretRef) deployClusterDeploymentCRD(ctx, kubeClient, spec) + aciSpec := getDefaultSNOAgentClusterInstallSpec() + // Add space suffix to SSHPublicKey to validate proper install + sshPublicKeySuffixSpace := fmt.Sprintf("%s ", aciSpec.SSHPublicKey) + aciSpec.SSHPublicKey = sshPublicKeySuffixSpace + deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec) clusterKey := types.NamespacedName{ Namespace: Options.Namespace, Name: spec.ClusterName, @@ -1101,7 +1187,11 @@ var _ = Describe("[kube-api]cluster installation", func() { }, "30s", "10s").Should(BeNil()) By("Wait for installing") - checkClusterCondition(ctx, clusterKey, controllers.ClusterInstalledCondition, controllers.InstallationInProgressReason) + installkey := types.NamespacedName{ + Namespace: Options.Namespace, + Name: clusterAgentCLusterInstallName, + } + checkAgentClusterInstallCondition(ctx, installkey, controllers.ClusterCompletedCondition, controllers.InstallationInProgressReason) Eventually(func() bool { c := getClusterFromDB(ctx, kubeClient, db, clusterKey, waitForReconcileTimeout) @@ -1127,16 +1217,8 @@ var _ = Describe("[kube-api]cluster installation", func() { Expect(err).NotTo(HaveOccurred()) By("Verify Cluster Metadata") - Eventually(func() bool { - return getClusterDeploymentCRD(ctx, kubeClient, clusterKey).Spec.Installed - }, "1m", "2s").Should(BeTrue()) - Eventually(func() string { - return getClusterDeploymentCRD(ctx, kubeClient, clusterKey).Status.APIURL - }, "1m", "2s").Should(Equal(fmt.Sprintf("https://api.%s.hive.example.com:6443", spec.ClusterName))) - Eventually(func() string { - return getClusterDeploymentCRD(ctx, kubeClient, clusterKey).Status.WebConsoleURL - }, "1m", "2s").Should(Equal(fmt.Sprintf("https://console-openshift-console.apps.%s.hive.example.com", spec.ClusterName))) - passwordSecretRef := getClusterDeploymentCRD(ctx, kubeClient, clusterKey).Spec.ClusterMetadata.AdminPasswordSecretRef + checkAgentClusterInstallCondition(ctx, installkey, controllers.ClusterCompletedCondition, controllers.InstalledReason) + passwordSecretRef := getAgentClusterInstallCRD(ctx, kubeClient, installkey).Spec.ClusterMetadata.AdminPasswordSecretRef Expect(passwordSecretRef).NotTo(BeNil()) passwordkey := types.NamespacedName{ Namespace: Options.Namespace, @@ -1145,7 +1227,7 @@ var _ = Describe("[kube-api]cluster installation", func() { passwordSecret := getSecret(ctx, kubeClient, passwordkey) Expect(passwordSecret.Data["password"]).NotTo(BeNil()) Expect(passwordSecret.Data["username"]).NotTo(BeNil()) - configSecretRef := getClusterDeploymentCRD(ctx, kubeClient, clusterKey).Spec.ClusterMetadata.AdminKubeconfigSecretRef + configSecretRef := getAgentClusterInstallCRD(ctx, kubeClient, installkey).Spec.ClusterMetadata.AdminKubeconfigSecretRef Expect(passwordSecretRef).NotTo(BeNil()) configkey := types.NamespacedName{ Namespace: Options.Namespace, @@ -1156,10 +1238,13 @@ var _ = Describe("[kube-api]cluster installation", func() { }) It("None SNO deploy clusterDeployment full install and validate MetaData", func() { + Skip("MGMT-6025 day2 test") By("Create cluster") secretRef := deployLocalObjectSecretIfNeeded(ctx, kubeClient) spec := getDefaultClusterDeploymentSpec(secretRef) deployClusterDeploymentCRD(ctx, kubeClient, spec) + aciSpec := getDefaultAgentClusterInstallSpec() + deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec) clusterKey := types.NamespacedName{ Namespace: Options.Namespace, Name: spec.ClusterName, @@ -1194,7 +1279,11 @@ var _ = Describe("[kube-api]cluster installation", func() { } By("Wait for installing") - checkClusterCondition(ctx, clusterKey, controllers.ClusterInstalledCondition, controllers.InstallationInProgressReason) + installkey := types.NamespacedName{ + Namespace: Options.Namespace, + Name: clusterAgentCLusterInstallName, + } + checkAgentClusterInstallCondition(ctx, installkey, controllers.ClusterCompletedCondition, controllers.InstallationInProgressReason) Eventually(func() bool { c := getClusterFromDB(ctx, kubeClient, db, clusterKey, waitForReconcileTimeout) for _, h := range c.Hosts { @@ -1225,7 +1314,7 @@ var _ = Describe("[kube-api]cluster installation", func() { Expect(err).NotTo(HaveOccurred()) By("Verify Day 2 Cluster") - checkClusterCondition(ctx, clusterKey, controllers.ClusterInstalledCondition, controllers.InstalledReason) + checkAgentClusterInstallCondition(ctx, installkey, controllers.ClusterCompletedCondition, controllers.InstalledReason) cluster = getClusterFromDB(ctx, kubeClient, db, clusterKey, waitForReconcileTimeout) Expect(*cluster.Kind).Should(Equal(models.ClusterKindAddHostsCluster)) @@ -1233,7 +1322,7 @@ var _ = Describe("[kube-api]cluster installation", func() { Eventually(func() bool { return getClusterDeploymentCRD(ctx, kubeClient, clusterKey).Spec.Installed }, "2m", "2s").Should(BeTrue()) - passwordSecretRef := getClusterDeploymentCRD(ctx, kubeClient, clusterKey).Spec.ClusterMetadata.AdminPasswordSecretRef + passwordSecretRef := getAgentClusterInstallCRD(ctx, kubeClient, installkey).Spec.ClusterMetadata.AdminPasswordSecretRef Expect(passwordSecretRef).NotTo(BeNil()) passwordkey := types.NamespacedName{ Namespace: Options.Namespace, @@ -1242,7 +1331,7 @@ var _ = Describe("[kube-api]cluster installation", func() { passwordSecret := getSecret(ctx, kubeClient, passwordkey) Expect(passwordSecret.Data["password"]).NotTo(BeNil()) Expect(passwordSecret.Data["username"]).NotTo(BeNil()) - configSecretRef := getClusterDeploymentCRD(ctx, kubeClient, clusterKey).Spec.ClusterMetadata.AdminKubeconfigSecretRef + configSecretRef := getAgentClusterInstallCRD(ctx, kubeClient, installkey).Spec.ClusterMetadata.AdminKubeconfigSecretRef Expect(passwordSecretRef).NotTo(BeNil()) configkey := types.NamespacedName{ Namespace: Options.Namespace, @@ -1253,10 +1342,13 @@ var _ = Describe("[kube-api]cluster installation", func() { }) It("None SNO deploy clusterDeployment full install and Day 2 new host", func() { + Skip("MGMT-6025 day2 test") By("Create cluster") secretRef := deployLocalObjectSecretIfNeeded(ctx, kubeClient) spec := getDefaultClusterDeploymentSpec(secretRef) deployClusterDeploymentCRD(ctx, kubeClient, spec) + aciSpec := getDefaultAgentClusterInstallSpec() + deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec) clusterKey := types.NamespacedName{ Namespace: Options.Namespace, Name: spec.ClusterName, @@ -1284,7 +1376,11 @@ var _ = Describe("[kube-api]cluster installation", func() { } By("Wait for installing") - checkClusterCondition(ctx, clusterKey, controllers.ClusterInstalledCondition, controllers.InstallationInProgressReason) + installkey := types.NamespacedName{ + Namespace: Options.Namespace, + Name: clusterAgentCLusterInstallName, + } + checkAgentClusterInstallCondition(ctx, installkey, controllers.ClusterCompletedCondition, controllers.InstallationInProgressReason) Eventually(func() bool { c := getClusterFromDB(ctx, kubeClient, db, clusterKey, waitForReconcileTimeout) for _, h := range c.Hosts { @@ -1311,7 +1407,7 @@ var _ = Describe("[kube-api]cluster installation", func() { Expect(err).NotTo(HaveOccurred()) By("Verify Day 2 Cluster") - checkClusterCondition(ctx, clusterKey, controllers.ClusterInstalledCondition, controllers.InstalledReason) + checkAgentClusterInstallCondition(ctx, installkey, controllers.ClusterCompletedCondition, controllers.InstalledReason) cluster = getClusterFromDB(ctx, kubeClient, db, clusterKey, waitForReconcileTimeout) Expect(*cluster.Kind).Should(Equal(models.ClusterKindAddHostsCluster)) @@ -1338,75 +1434,83 @@ var _ = Describe("[kube-api]cluster installation", func() { It("deploy clusterDeployment with invalid machine cidr", func() { secretRef := deployLocalObjectSecretIfNeeded(ctx, kubeClient) - clusterDeploymentSpec := getDefaultClusterDeploymentSNOSpec(secretRef) - clusterDeploymentSpec.Provisioning.InstallStrategy.Agent.Networking.MachineNetwork = []agentv1.MachineNetworkEntry{{CIDR: "1.2.3.5/24"}} + clusterDeploymentSpec := getDefaultClusterDeploymentSpec(secretRef) deployClusterDeploymentCRD(ctx, kubeClient, clusterDeploymentSpec) - clusterKubeName := types.NamespacedName{ + aciSpec := getDefaultSNOAgentClusterInstallSpec() + aciSpec.Networking.MachineNetwork = []hiveext.MachineNetworkEntry{{CIDR: "1.2.3.5/24"}} + deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec) + installkey := types.NamespacedName{ Namespace: Options.Namespace, - Name: clusterDeploymentSpec.ClusterName, + Name: clusterAgentCLusterInstallName, } - checkClusterCondition(ctx, clusterKubeName, controllers.ClusterReadyForInstallationCondition, controllers.ClusterNotReadyReason) + checkAgentClusterInstallCondition(ctx, installkey, controllers.ClusterRequirementsMetCondition, controllers.ClusterNotReadyReason) }) It("deploy clusterDeployment without machine cidr", func() { secretRef := deployLocalObjectSecretIfNeeded(ctx, kubeClient) - clusterDeploymentSpec := getDefaultClusterDeploymentSNOSpec(secretRef) - clusterDeploymentSpec.Provisioning.InstallStrategy.Agent.Networking.MachineNetwork = []agentv1.MachineNetworkEntry{} + clusterDeploymentSpec := getDefaultClusterDeploymentSpec(secretRef) deployClusterDeploymentCRD(ctx, kubeClient, clusterDeploymentSpec) - clusterKubeName := types.NamespacedName{ + aciSpec := getDefaultSNOAgentClusterInstallSpec() + aciSpec.Networking.MachineNetwork = []hiveext.MachineNetworkEntry{} + deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec) + installkey := types.NamespacedName{ Namespace: Options.Namespace, - Name: clusterDeploymentSpec.ClusterName, + Name: clusterAgentCLusterInstallName, } - checkClusterCondition(ctx, clusterKubeName, controllers.ClusterReadyForInstallationCondition, controllers.ClusterNotReadyReason) + checkAgentClusterInstallCondition(ctx, installkey, controllers.ClusterRequirementsMetCondition, controllers.ClusterNotReadyReason) }) It("deploy clusterDeployment with invalid clusterImageSet", func() { secretRef := deployLocalObjectSecretIfNeeded(ctx, kubeClient) clusterDeploymentSpec := getDefaultClusterDeploymentSpec(secretRef) - clusterDeploymentSpec.Provisioning.ImageSetRef.Name = "invalid" deployClusterDeploymentCRD(ctx, kubeClient, clusterDeploymentSpec) - clusterKubeName := types.NamespacedName{ + aciSpec := getDefaultAgentClusterInstallSpec() + aciSpec.ImageSetRef.Name = "invalid" + deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec) + installkey := types.NamespacedName{ Namespace: Options.Namespace, - Name: clusterDeploymentSpec.ClusterName, + Name: clusterAgentCLusterInstallName, } - checkClusterCondition(ctx, clusterKubeName, controllers.ClusterSpecSyncedCondition, controllers.BackendErrorReason) + checkAgentClusterInstallCondition(ctx, installkey, controllers.ClusterSpecSyncedCondition, controllers.BackendErrorReason) }) It("deploy clusterDeployment with missing clusterImageSet", func() { secretRef := deployLocalObjectSecretIfNeeded(ctx, kubeClient) spec := getDefaultClusterDeploymentSpec(secretRef) + deployClusterDeploymentCRD(ctx, kubeClient, spec) - // Create ClusterDeployment - err := kubeClient.Create(ctx, &hivev1.ClusterDeployment{ + aciSpec := getDefaultAgentClusterInstallSpec() + // Create AgentClusterInstall without creating the clusterImageSet + err := kubeClient.Create(ctx, &hiveext.AgentClusterInstall{ TypeMeta: metav1.TypeMeta{ - Kind: "ClusterDeployment", - APIVersion: getAPIVersion(), + Kind: "AgentClusterInstall", + APIVersion: "hiveextension/v1beta1", }, ObjectMeta: metav1.ObjectMeta{ Namespace: Options.Namespace, - Name: spec.ClusterName, + Name: clusterAgentCLusterInstallName, }, - Spec: *spec, + Spec: *aciSpec, }) Expect(err).To(BeNil()) - - clusterKubeName := types.NamespacedName{ + installkey := types.NamespacedName{ Namespace: Options.Namespace, - Name: spec.ClusterName, + Name: clusterAgentCLusterInstallName, } - checkClusterCondition(ctx, clusterKubeName, controllers.ClusterSpecSyncedCondition, controllers.BackendErrorReason) + checkAgentClusterInstallCondition(ctx, installkey, controllers.ClusterSpecSyncedCondition, controllers.BackendErrorReason) // Create ClusterImageSet - deployClusterImageSetCRD(ctx, kubeClient, spec.Provisioning.ImageSetRef) - checkClusterCondition(ctx, clusterKubeName, controllers.ClusterSpecSyncedCondition, controllers.SyncedOkReason) + deployClusterImageSetCRD(ctx, kubeClient, aciSpec.ImageSetRef) + checkAgentClusterInstallCondition(ctx, installkey, controllers.ClusterSpecSyncedCondition, controllers.SyncedOkReason) }) - It("deploy clusterDeployment with manifest reference with bad manifest and then fixing it ", func() { + It("deploy agentClusterInstall with manifest reference with bad manifest and then fixing it ", func() { By("Create cluster") secretRef := deployLocalObjectSecretIfNeeded(ctx, kubeClient) - clusterDeploymentSpec := getDefaultClusterDeploymentSNOSpec(secretRef) + clusterDeploymentSpec := getDefaultClusterDeploymentSpec(secretRef) + aciSpec := getDefaultSNOAgentClusterInstallSpec() ref := &corev1.LocalObjectReference{Name: "cluster-install-config"} - clusterDeploymentSpec.Provisioning.ManifestsConfigMapRef = ref + aciSpec.ManifestsConfigMapRef = ref content := `apiVersion: machineconfiguration.openshift.io/v1 kind: MachineConfig metadata: @@ -1419,6 +1523,7 @@ spec: By("Start installation without config map") deployClusterDeploymentCRD(ctx, kubeClient, clusterDeploymentSpec) + deployAgentClusterInstallCRD(ctx, kubeClient, aciSpec) clusterKey := types.NamespacedName{ Namespace: Options.Namespace, Name: clusterDeploymentSpec.ClusterName, @@ -1436,8 +1541,12 @@ spec: agent.Spec.Approved = true return kubeClient.Update(ctx, agent) }, "30s", "10s").Should(BeNil()) - checkClusterCondition(ctx, clusterKey, controllers.ClusterReadyForInstallationCondition, controllers.ClusterReadyReason) - checkClusterCondition(ctx, clusterKey, controllers.ClusterSpecSyncedCondition, controllers.BackendErrorReason) + installkey := types.NamespacedName{ + Namespace: Options.Namespace, + Name: clusterAgentCLusterInstallName, + } + checkAgentClusterInstallCondition(ctx, installkey, controllers.ClusterRequirementsMetCondition, controllers.ClusterReadyReason) + checkAgentClusterInstallCondition(ctx, installkey, controllers.ClusterSpecSyncedCondition, controllers.BackendErrorReason) By("Deploy bad config map") data := map[string]string{"test.yaml": content, "test.dc": "test"} @@ -1446,17 +1555,17 @@ spec: _ = kubeClient.Delete(ctx, cm) }() - checkClusterCondition(ctx, clusterKey, controllers.ClusterReadyForInstallationCondition, controllers.ClusterReadyReason) - checkClusterCondition(ctx, clusterKey, controllers.ClusterSpecSyncedCondition, controllers.InputErrorReason) + checkAgentClusterInstallCondition(ctx, installkey, controllers.ClusterRequirementsMetCondition, controllers.ClusterReadyReason) + checkAgentClusterInstallCondition(ctx, installkey, controllers.ClusterSpecSyncedCondition, controllers.InputErrorReason) By("Fixing configmap and expecting installation to start") // adding sleep to be sure all reconciles will finish, will test that requeue worked as expected time.Sleep(30 * time.Second) - checkClusterCondition(ctx, clusterKey, controllers.ClusterReadyForInstallationCondition, controllers.ClusterReadyReason) + checkAgentClusterInstallCondition(ctx, installkey, controllers.ClusterRequirementsMetCondition, controllers.ClusterReadyReason) data = map[string]string{"test.yaml": content, "test2.yaml": content} deployOrUpdateConfigMap(ctx, kubeClient, ref.Name, data) - checkClusterCondition(ctx, clusterKey, controllers.ClusterReadyForInstallationCondition, controllers.ClusterAlreadyInstallingReason) + checkAgentClusterInstallCondition(ctx, installkey, controllers.ClusterRequirementsMetCondition, controllers.ClusterAlreadyInstallingReason) }) }) diff --git a/subsystem/subsystem_suite_test.go b/subsystem/subsystem_suite_test.go index 2c28f85e7ec..25da17ef54a 100644 --- a/subsystem/subsystem_suite_test.go +++ b/subsystem/subsystem_suite_test.go @@ -16,6 +16,7 @@ import ( . "github.com/onsi/gomega" "github.com/openshift/assisted-service/client" "github.com/openshift/assisted-service/client/versions" + hiveext "github.com/openshift/assisted-service/internal/controller/api/hiveextension/v1beta1" "github.com/openshift/assisted-service/internal/controller/api/v1beta1" "github.com/openshift/assisted-service/pkg/auth" hivev1 "github.com/openshift/hive/apis/hive/v1" @@ -76,7 +77,9 @@ func setupKubeClient() { if addErr := hivev1.AddToScheme(scheme.Scheme); addErr != nil { logrus.Fatalf("Fail adding kubernetes hivev1 scheme: %s", addErr) } - + if addErr := hiveext.AddToScheme(scheme.Scheme); addErr != nil { + logrus.Fatalf("Fail adding kubernetes hivev1 scheme: %s", addErr) + } if addErr := bmh_v1alpha1.AddToScheme(scheme.Scheme); addErr != nil { logrus.Fatalf("Fail adding kubernetes bmh scheme: %s", addErr) }