diff --git a/apis/features/v1/groupversion_info.go b/apis/features/v1/groupversion_info.go index f976be504b6..c3bc37feb6f 100644 --- a/apis/features/v1/groupversion_info.go +++ b/apis/features/v1/groupversion_info.go @@ -17,7 +17,7 @@ limitations under the License. // +kubebuilder:object:generate=true // +groupName=features.opendatahub.io -// Package v1 contains API Schema definitions for the datasciencecluster v1 API group +// Package v1 contains API Schema definitions for the feature v1 API group package v1 import ( diff --git a/components/codeflare/codeflare.go b/components/codeflare/codeflare.go index 69bce1d5435..20a0caa812b 100644 --- a/components/codeflare/codeflare.go +++ b/components/codeflare/codeflare.go @@ -15,8 +15,8 @@ import ( dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" "github.com/opendatahub-io/opendatahub-operator/v2/components" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/monitoring" ) var ( @@ -67,7 +67,7 @@ func (c *CodeFlare) ReconcileComponent(ctx context.Context, cli client.Client, l enabled := c.GetManagementState() == operatorv1.Managed monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed - platform, err := deploy.GetPlatform(cli) + platform, err := cluster.GetPlatform(cli) if err != nil { return err } @@ -81,7 +81,7 @@ func (c *CodeFlare) ReconcileComponent(ctx context.Context, cli client.Client, l // check if the CodeFlare operator is installed: it should not be installed dependentOperator := CodeflareOperator // overwrite dependent operator if downstream not match upstream - if platform == deploy.SelfManagedRhods || platform == deploy.ManagedRhods { + if platform == cluster.SelfManagedRhods || platform == cluster.ManagedRhods { dependentOperator = RHCodeflareOperator } @@ -109,10 +109,10 @@ func (c *CodeFlare) ReconcileComponent(ctx context.Context, cli client.Client, l } l.Info("apply manifests done") // CloudServiceMonitoring handling - if platform == deploy.ManagedRhods { + if platform == cluster.ManagedRhods { if enabled { // first check if the service is up, so prometheus won't fire alerts when it is just startup - if err := monitoring.WaitForDeploymentAvailable(ctx, cli, ComponentName, dscispec.ApplicationsNamespace, 20, 2); err != nil { + if err := cluster.WaitForDeploymentAvailable(ctx, cli, ComponentName, dscispec.ApplicationsNamespace, 20, 2); err != nil { return fmt.Errorf("deployment for %s is not ready to server: %w", ComponentName, err) } l.Info("deployment is done, updating monitoring rules") diff --git a/components/dashboard/dashboard.go b/components/dashboard/dashboard.go index ad41ec3d800..2a8978b682d 100644 --- a/components/dashboard/dashboard.go +++ b/components/dashboard/dashboard.go @@ -22,7 +22,6 @@ import ( "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/common" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/monitoring" ) var ( @@ -62,7 +61,7 @@ func (d *Dashboard) OverrideManifests(platform string) error { return err } // If overlay is defined, update paths - if platform == string(deploy.ManagedRhods) || platform == string(deploy.SelfManagedRhods) { + if platform == string(cluster.ManagedRhods) || platform == string(cluster.SelfManagedRhods) { defaultKustomizePath := "overlays/rhoai" if manifestConfig.SourcePath != "" { defaultKustomizePath = manifestConfig.SourcePath @@ -93,11 +92,11 @@ func (d *Dashboard) ReconcileComponent(ctx context.Context, currentComponentExist bool, ) error { var l logr.Logger - platform, err := deploy.GetPlatform(cli) + platform, err := cluster.GetPlatform(cli) if err != nil { return err } - if platform == deploy.SelfManagedRhods || platform == deploy.ManagedRhods { + if platform == cluster.SelfManagedRhods || platform == cluster.ManagedRhods { l = d.ConfigComponentLogger(logger, ComponentNameSupported, dscispec) } else { l = d.ConfigComponentLogger(logger, ComponentName, dscispec) @@ -128,14 +127,14 @@ func (d *Dashboard) ReconcileComponent(ctx context.Context, } // 2. platform specific RBAC - if platform == deploy.OpenDataHub || platform == "" { + if platform == cluster.OpenDataHub || platform == "" { err := cluster.UpdatePodSecurityRolebinding(cli, dscispec.ApplicationsNamespace, "odh-dashboard") if err != nil { return err } } - if platform == deploy.SelfManagedRhods || platform == deploy.ManagedRhods { + if platform == cluster.SelfManagedRhods || platform == cluster.ManagedRhods { err := cluster.UpdatePodSecurityRolebinding(cli, dscispec.ApplicationsNamespace, "rhods-dashboard") if err != nil { return err @@ -153,7 +152,7 @@ func (d *Dashboard) ReconcileComponent(ctx context.Context, // common: Deploy odh-dashboard manifests // TODO: check if we can have the same component name odh-dashboard for both, or still keep rhods-dashboard for RHOAI switch platform { - case deploy.SelfManagedRhods, deploy.ManagedRhods: + case cluster.SelfManagedRhods, cluster.ManagedRhods: // anaconda if err := cluster.CreateSecret(cli, "anaconda-ce-access", dscispec.ApplicationsNamespace); err != nil { return fmt.Errorf("failed to create access-secret for anaconda: %w", err) @@ -178,10 +177,10 @@ func (d *Dashboard) ReconcileComponent(ctx context.Context, l.Info("apply manifests done") // CloudService Monitoring handling - if platform == deploy.ManagedRhods { + if platform == cluster.ManagedRhods { if enabled { // first check if the service is up, so prometheus won't fire alerts when it is just startup - if err := monitoring.WaitForDeploymentAvailable(ctx, cli, ComponentNameSupported, dscispec.ApplicationsNamespace, 20, 3); err != nil { + if err := cluster.WaitForDeploymentAvailable(ctx, cli, ComponentNameSupported, dscispec.ApplicationsNamespace, 20, 3); err != nil { return fmt.Errorf("deployment for %s is not ready to server: %w", ComponentName, err) } l.Info("deployment is done, updating monitoring rules") @@ -221,23 +220,23 @@ func (d *Dashboard) ReconcileComponent(ctx context.Context, } } -func (d *Dashboard) deployCRDsForPlatform(cli client.Client, owner metav1.Object, namespace string, platform deploy.Platform) error { +func (d *Dashboard) deployCRDsForPlatform(cli client.Client, owner metav1.Object, namespace string, platform cluster.Platform) error { componentName := ComponentName - if platform == deploy.SelfManagedRhods || platform == deploy.ManagedRhods { + if platform == cluster.SelfManagedRhods || platform == cluster.ManagedRhods { componentName = ComponentNameSupported } // we only deploy CRD, we do not remove CRD return deploy.DeployManifestsFromPath(cli, owner, PathCRDs, namespace, componentName, true) } -func (d *Dashboard) applyRHOAISpecificConfigs(cli client.Client, owner metav1.Object, namespace string, platform deploy.Platform) error { +func (d *Dashboard) applyRHOAISpecificConfigs(cli client.Client, owner metav1.Object, namespace string, platform cluster.Platform) error { enabled := d.ManagementState == operatorv1.Managed // set proper group name dashboardConfig := filepath.Join(PathODHDashboardConfig, "odhdashboardconfig.yaml") - adminGroups := map[deploy.Platform]string{ - deploy.SelfManagedRhods: "rhods-admins", - deploy.ManagedRhods: "dedicated-admins", + adminGroups := map[cluster.Platform]string{ + cluster.SelfManagedRhods: "rhods-admins", + cluster.ManagedRhods: "dedicated-admins", }[platform] if err := common.ReplaceStringsInFile(dashboardConfig, map[string]string{"": adminGroups}); err != nil { @@ -248,7 +247,7 @@ func (d *Dashboard) applyRHOAISpecificConfigs(cli client.Client, owner metav1.Ob } // ISV path := PathISVSM - if platform == deploy.ManagedRhods { + if platform == cluster.ManagedRhods { path = PathISVAddOn } if err := deploy.DeployManifestsFromPath(cli, owner, path, namespace, ComponentNameSupported, enabled); err != nil { @@ -257,14 +256,14 @@ func (d *Dashboard) applyRHOAISpecificConfigs(cli client.Client, owner metav1.Ob return nil } -func (d *Dashboard) deployConsoleLink(cli client.Client, owner metav1.Object, platform deploy.Platform, namespace, componentName string) error { +func (d *Dashboard) deployConsoleLink(cli client.Client, owner metav1.Object, platform cluster.Platform, namespace, componentName string) error { var manifestsPath, sectionTitle, routeName string switch platform { - case deploy.SelfManagedRhods: + case cluster.SelfManagedRhods: sectionTitle = "OpenShift Self Managed Services" manifestsPath = PathConsoleLinkSupported routeName = componentName - case deploy.ManagedRhods: + case cluster.ManagedRhods: sectionTitle = "OpenShift Managed Services" manifestsPath = PathConsoleLinkSupported routeName = componentName diff --git a/components/datasciencepipelines/datasciencepipelines.go b/components/datasciencepipelines/datasciencepipelines.go index fc3227a5f0f..9eada443c65 100644 --- a/components/datasciencepipelines/datasciencepipelines.go +++ b/components/datasciencepipelines/datasciencepipelines.go @@ -1,5 +1,5 @@ // Package datasciencepipelines provides utility functions to config Data Science Pipelines: -// Pipeline solution for end to end MLOps workflows that support the Kubeflow Pipelines SDK and Tekton +// Pipeline solution for end to end MLOps workflows that support the Kubeflow Pipelines SDK, Tekton and Argo Workflows. // +groupName=datasciencecluster.opendatahub.io package datasciencepipelines @@ -17,9 +17,9 @@ import ( dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" "github.com/opendatahub-io/opendatahub-operator/v2/components" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/monitoring" ) var ( @@ -90,7 +90,7 @@ func (d *DataSciencePipelines) ReconcileComponent(ctx context.Context, enabled := d.GetManagementState() == operatorv1.Managed monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed - platform, err := deploy.GetPlatform(cli) + platform, err := cluster.GetPlatform(cli) if err != nil { return err } @@ -116,7 +116,7 @@ func (d *DataSciencePipelines) ReconcileComponent(ctx context.Context, // new overlay manifestsPath := filepath.Join(OverlayPath, "rhoai") - if platform == deploy.OpenDataHub || platform == "" { + if platform == cluster.OpenDataHub || platform == "" { manifestsPath = filepath.Join(OverlayPath, "odh") } if err = deploy.DeployManifestsFromPath(cli, owner, manifestsPath, dscispec.ApplicationsNamespace, ComponentName, enabled); err != nil { @@ -125,11 +125,11 @@ func (d *DataSciencePipelines) ReconcileComponent(ctx context.Context, l.Info("apply manifests done") // CloudService Monitoring handling - if platform == deploy.ManagedRhods { + if platform == cluster.ManagedRhods { if enabled { // first check if the service is up, so prometheus won't fire alerts when it is just startup // only 1 replica should be very quick - if err := monitoring.WaitForDeploymentAvailable(ctx, cli, ComponentName, dscispec.ApplicationsNamespace, 10, 1); err != nil { + if err := cluster.WaitForDeploymentAvailable(ctx, cli, ComponentName, dscispec.ApplicationsNamespace, 10, 1); err != nil { return fmt.Errorf("deployment for %s is not ready to server: %w", ComponentName, err) } l.Info("deployment is done, updating monitoring rules") diff --git a/components/kserve/kserve.go b/components/kserve/kserve.go index f918b2b9601..d4c16f39b57 100644 --- a/components/kserve/kserve.go +++ b/components/kserve/kserve.go @@ -18,7 +18,6 @@ import ( "github.com/opendatahub-io/opendatahub-operator/v2/components" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/monitoring" ) var ( @@ -108,7 +107,7 @@ func (k *Kserve) ReconcileComponent(ctx context.Context, cli client.Client, enabled := k.GetManagementState() == operatorv1.Managed monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed - platform, err := deploy.GetPlatform(cli) + platform, err := cluster.GetPlatform(cli) if err != nil { return err } @@ -168,10 +167,10 @@ func (k *Kserve) ReconcileComponent(ctx context.Context, cli client.Client, } l.WithValues("Path", Path).Info("apply manifests done for odh-model-controller") // CloudService Monitoring handling - if platform == deploy.ManagedRhods { + if platform == cluster.ManagedRhods { if enabled { // first check if the service is up, so prometheus won't fire alerts when it is just startup - if err := monitoring.WaitForDeploymentAvailable(ctx, cli, ComponentName, dscispec.ApplicationsNamespace, 20, 2); err != nil { + if err := cluster.WaitForDeploymentAvailable(ctx, cli, ComponentName, dscispec.ApplicationsNamespace, 20, 2); err != nil { return fmt.Errorf("deployment for %s is not ready to server: %w", ComponentName, err) } l.Info("deployment is done, updating monitoing rules") diff --git a/components/kueue/kueue.go b/components/kueue/kueue.go index 0271abd3ecf..3d18f46cced 100644 --- a/components/kueue/kueue.go +++ b/components/kueue/kueue.go @@ -13,8 +13,8 @@ import ( dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" "github.com/opendatahub-io/opendatahub-operator/v2/components" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/monitoring" ) var ( @@ -62,7 +62,7 @@ func (k *Kueue) ReconcileComponent(ctx context.Context, cli client.Client, logge enabled := k.GetManagementState() == operatorv1.Managed monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed - platform, err := deploy.GetPlatform(cli) + platform, err := cluster.GetPlatform(cli) if err != nil { return err } @@ -86,10 +86,10 @@ func (k *Kueue) ReconcileComponent(ctx context.Context, cli client.Client, logge } l.Info("apply manifests done") // CloudService Monitoring handling - if platform == deploy.ManagedRhods { + if platform == cluster.ManagedRhods { if enabled { // first check if the service is up, so prometheus won't fire alerts when it is just startup - if err := monitoring.WaitForDeploymentAvailable(ctx, cli, ComponentName, dscispec.ApplicationsNamespace, 20, 2); err != nil { + if err := cluster.WaitForDeploymentAvailable(ctx, cli, ComponentName, dscispec.ApplicationsNamespace, 20, 2); err != nil { return fmt.Errorf("deployment for %s is not ready to server: %w", ComponentName, err) } l.Info("deployment is done, updating monitoring rules") diff --git a/components/modelmeshserving/modelmeshserving.go b/components/modelmeshserving/modelmeshserving.go index 06616e3a9b3..04949ed4619 100644 --- a/components/modelmeshserving/modelmeshserving.go +++ b/components/modelmeshserving/modelmeshserving.go @@ -17,7 +17,6 @@ import ( "github.com/opendatahub-io/opendatahub-operator/v2/components" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/monitoring" ) var ( @@ -95,7 +94,7 @@ func (m *ModelMeshServing) ReconcileComponent(ctx context.Context, enabled := m.GetManagementState() == operatorv1.Managed monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed - platform, err := deploy.GetPlatform(cli) + platform, err := cluster.GetPlatform(cli) if err != nil { return err } @@ -150,10 +149,10 @@ func (m *ModelMeshServing) ReconcileComponent(ctx context.Context, l.WithValues("Path", DependentPath).Info("apply manifests done for odh-model-controller") // CloudService Monitoring handling - if platform == deploy.ManagedRhods { + if platform == cluster.ManagedRhods { if enabled { // first check if service is up, so prometheus won't fire alerts when it is just startup - if err := monitoring.WaitForDeploymentAvailable(ctx, cli, ComponentName, dscispec.ApplicationsNamespace, 20, 2); err != nil { + if err := cluster.WaitForDeploymentAvailable(ctx, cli, ComponentName, dscispec.ApplicationsNamespace, 20, 2); err != nil { return fmt.Errorf("deployment for %s is not ready to server: %w", ComponentName, err) } l.Info("deployment is done, updating monitoring rules") diff --git a/components/modelregistry/modelregistry.go b/components/modelregistry/modelregistry.go index d76c07a1026..0d72f7947cc 100644 --- a/components/modelregistry/modelregistry.go +++ b/components/modelregistry/modelregistry.go @@ -68,7 +68,7 @@ func (m *ModelRegistry) ReconcileComponent(_ context.Context, cli client.Client, } enabled := m.GetManagementState() == operatorv1.Managed - platform, err := deploy.GetPlatform(cli) + platform, err := cluster.GetPlatform(cli) if err != nil { return err } diff --git a/components/ray/ray.go b/components/ray/ray.go index e834fe64df0..ea15b560656 100644 --- a/components/ray/ray.go +++ b/components/ray/ray.go @@ -15,8 +15,8 @@ import ( dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" "github.com/opendatahub-io/opendatahub-operator/v2/components" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/monitoring" ) var ( @@ -66,7 +66,7 @@ func (r *Ray) ReconcileComponent(ctx context.Context, cli client.Client, logger enabled := r.GetManagementState() == operatorv1.Managed monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed - platform, err := deploy.GetPlatform(cli) + platform, err := cluster.GetPlatform(cli) if err != nil { return err } @@ -90,10 +90,10 @@ func (r *Ray) ReconcileComponent(ctx context.Context, cli client.Client, logger } l.Info("apply manifests done") // CloudService Monitoring handling - if platform == deploy.ManagedRhods { + if platform == cluster.ManagedRhods { if enabled { // first check if the service is up, so prometheus won't fire alerts when it is just startup - if err := monitoring.WaitForDeploymentAvailable(ctx, cli, ComponentName, dscispec.ApplicationsNamespace, 20, 2); err != nil { + if err := cluster.WaitForDeploymentAvailable(ctx, cli, ComponentName, dscispec.ApplicationsNamespace, 20, 2); err != nil { return fmt.Errorf("deployment for %s is not ready to server: %w", ComponentName, err) } l.Info("deployment is done, updating monitoring rules") diff --git a/components/trainingoperator/trainingoperator.go b/components/trainingoperator/trainingoperator.go index bdb9a3f9725..67bc543fd71 100644 --- a/components/trainingoperator/trainingoperator.go +++ b/components/trainingoperator/trainingoperator.go @@ -1,5 +1,6 @@ // Package trainingoperator provides utility functions to config trainingoperator as part of the stack // which makes managing distributed compute infrastructure in the cloud easy and intuitive for Data Scientists +// +groupName=datasciencecluster.opendatahub.io package trainingoperator import ( @@ -14,8 +15,8 @@ import ( dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" "github.com/opendatahub-io/opendatahub-operator/v2/components" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/monitoring" ) var ( @@ -65,7 +66,7 @@ func (r *TrainingOperator) ReconcileComponent(ctx context.Context, cli client.Cl enabled := r.GetManagementState() == operatorv1.Managed monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed - platform, err := deploy.GetPlatform(cli) + platform, err := cluster.GetPlatform(cli) if err != nil { return err } @@ -89,10 +90,10 @@ func (r *TrainingOperator) ReconcileComponent(ctx context.Context, cli client.Cl } l.Info("apply manifests done") // CloudService Monitoring handling - if platform == deploy.ManagedRhods { + if platform == cluster.ManagedRhods { if enabled { // first check if the service is up, so prometheus wont fire alerts when it is just startup - if err := monitoring.WaitForDeploymentAvailable(ctx, cli, ComponentName, dscispec.ApplicationsNamespace, 20, 2); err != nil { + if err := cluster.WaitForDeploymentAvailable(ctx, cli, ComponentName, dscispec.ApplicationsNamespace, 20, 2); err != nil { return fmt.Errorf("deployment for %s is not ready to server: %w", ComponentName, err) } fmt.Printf("deployment for %s is done, updating monitoring rules\n", ComponentName) diff --git a/components/trustyai/trustyai.go b/components/trustyai/trustyai.go index 92f989bb8e6..31574708ffe 100644 --- a/components/trustyai/trustyai.go +++ b/components/trustyai/trustyai.go @@ -14,8 +14,8 @@ import ( dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" "github.com/opendatahub-io/opendatahub-operator/v2/components" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/monitoring" ) var ( @@ -64,7 +64,7 @@ func (t *TrustyAI) ReconcileComponent(ctx context.Context, cli client.Client, lo enabled := t.GetManagementState() == operatorv1.Managed monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed - platform, err := deploy.GetPlatform(cli) + platform, err := cluster.GetPlatform(cli) if err != nil { return err } @@ -89,9 +89,9 @@ func (t *TrustyAI) ReconcileComponent(ctx context.Context, cli client.Client, lo l.Info("apply manifests done") // CloudService Monitoring handling - if platform == deploy.ManagedRhods { + if platform == cluster.ManagedRhods { if enabled { - if err := monitoring.WaitForDeploymentAvailable(ctx, cli, ComponentName, dscispec.ApplicationsNamespace, 10, 1); err != nil { + if err := cluster.WaitForDeploymentAvailable(ctx, cli, ComponentName, dscispec.ApplicationsNamespace, 10, 1); err != nil { return fmt.Errorf("deployment for %s is not ready to server: %w", ComponentName, err) } l.Info("deployment is done, updating monitoring rules") diff --git a/components/workbenches/workbenches.go b/components/workbenches/workbenches.go index 2a14d2ef5af..14755f28db4 100644 --- a/components/workbenches/workbenches.go +++ b/components/workbenches/workbenches.go @@ -18,7 +18,6 @@ import ( "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/monitoring" ) var ( @@ -57,7 +56,7 @@ func (w *Workbenches) OverrideManifests(platform string) error { defaultKustomizePath = subcomponent.SourcePath defaultKustomizePathSupported = subcomponent.SourcePath } - if platform == string(deploy.ManagedRhods) || platform == string(deploy.SelfManagedRhods) { + if platform == string(cluster.ManagedRhods) || platform == string(cluster.SelfManagedRhods) { notebookImagesPathSupported = filepath.Join(deploy.DefaultManifestPath, "jupyterhub", defaultKustomizePathSupported) } else { notebookImagesPath = filepath.Join(deploy.DefaultManifestPath, DependentComponentName, defaultKustomizePath) @@ -109,7 +108,7 @@ func (w *Workbenches) ReconcileComponent(ctx context.Context, cli client.Client, // Create rhods-notebooks namespace in managed platforms enabled := w.GetManagementState() == operatorv1.Managed monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed - platform, err := deploy.GetPlatform(cli) + platform, err := cluster.GetPlatform(cli) if err != nil { return err } @@ -123,7 +122,7 @@ func (w *Workbenches) ReconcileComponent(ctx context.Context, cli client.Client, return err } } - if platform == deploy.SelfManagedRhods || platform == deploy.ManagedRhods { + if platform == cluster.SelfManagedRhods || platform == cluster.ManagedRhods { // Intentionally leaving the ownership unset for this namespace. // Specifying this label triggers its deletion when the operator is uninstalled. _, err := cluster.CreateNamespace(cli, "rhods-notebooks", cluster.WithLabels(labels.ODH.OwnedNamespace, "true")) @@ -145,7 +144,7 @@ func (w *Workbenches) ReconcileComponent(ctx context.Context, cli client.Client, // Update image parameters for nbc in downstream if enabled { if (dscispec.DevFlags == nil || dscispec.DevFlags.ManifestsUri == "") && (w.DevFlags == nil || len(w.DevFlags.Manifests) == 0) { - if platform == deploy.ManagedRhods || platform == deploy.SelfManagedRhods { + if platform == cluster.ManagedRhods || platform == cluster.SelfManagedRhods { // for kf-notebook-controller image if err := deploy.ApplyParams(notebookControllerPath, imageParamMap, false); err != nil { return fmt.Errorf("failed to update image %s: %w", notebookControllerPath, err) @@ -159,7 +158,7 @@ func (w *Workbenches) ReconcileComponent(ctx context.Context, cli client.Client, } var manifestsPath string - if platform == deploy.OpenDataHub || platform == "" { + if platform == cluster.OpenDataHub || platform == "" { // only for ODH after transit to kubeflow repo if err = deploy.DeployManifestsFromPath(cli, owner, kfnotebookControllerPath, @@ -179,11 +178,11 @@ func (w *Workbenches) ReconcileComponent(ctx context.Context, cli client.Client, } l.WithValues("Path", manifestsPath).Info("apply manifests done notebook image") // CloudService Monitoring handling - if platform == deploy.ManagedRhods { + if platform == cluster.ManagedRhods { if enabled { // first check if the service is up, so prometheus wont fire alerts when it is just startup // only 1 replica set timeout to 1min - if err := monitoring.WaitForDeploymentAvailable(ctx, cli, ComponentName, dscispec.ApplicationsNamespace, 10, 1); err != nil { + if err := cluster.WaitForDeploymentAvailable(ctx, cli, ComponentName, dscispec.ApplicationsNamespace, 10, 1); err != nil { return fmt.Errorf("deployments for %s are not ready to server: %w", ComponentName, err) } l.Info("deployment is done, updating monitoring rules") diff --git a/controllers/dscinitialization/dscinitialization_controller.go b/controllers/dscinitialization/dscinitialization_controller.go index bf9e1b20a59..498035197ab 100644 --- a/controllers/dscinitialization/dscinitialization_controller.go +++ b/controllers/dscinitialization/dscinitialization_controller.go @@ -151,7 +151,7 @@ func (r *DSCInitializationReconciler) Reconcile(ctx context.Context, req ctrl.Re managementStateChangeTrustedCA = false // Get platform - platform, err := deploy.GetPlatform(r.Client) + platform, err := cluster.GetPlatform(r.Client) if err != nil { r.Log.Error(err, "Failed to determine platform (managed vs self-managed)") @@ -160,7 +160,7 @@ func (r *DSCInitializationReconciler) Reconcile(ctx context.Context, req ctrl.Re switch req.Name { case "prometheus": // prometheus configmap - if instance.Spec.Monitoring.ManagementState == operatorv1.Managed && platform == deploy.ManagedRhods { + if instance.Spec.Monitoring.ManagementState == operatorv1.Managed && platform == cluster.ManagedRhods { r.Log.Info("Monitoring enabled to restart deployment", "cluster", "Managed Service Mode") err := r.configureManagedMonitoring(ctx, instance, "updates") if err != nil { @@ -170,7 +170,7 @@ func (r *DSCInitializationReconciler) Reconcile(ctx context.Context, req ctrl.Re return ctrl.Result{}, nil case "addon-managed-odh-parameters": - if instance.Spec.Monitoring.ManagementState == operatorv1.Managed && platform == deploy.ManagedRhods { + if instance.Spec.Monitoring.ManagementState == operatorv1.Managed && platform == cluster.ManagedRhods { r.Log.Info("Monitoring enabled when notification updated", "cluster", "Managed Service Mode") err := r.configureManagedMonitoring(ctx, instance, "updates") if err != nil { @@ -180,7 +180,7 @@ func (r *DSCInitializationReconciler) Reconcile(ctx context.Context, req ctrl.Re return ctrl.Result{}, nil case "backup": // revert back to the original prometheus.yml - if instance.Spec.Monitoring.ManagementState == operatorv1.Managed && platform == deploy.ManagedRhods { + if instance.Spec.Monitoring.ManagementState == operatorv1.Managed && platform == cluster.ManagedRhods { r.Log.Info("Monitoring enabled to restore back", "cluster", "Managed Service Mode") err := r.configureManagedMonitoring(ctx, instance, "revertbackup") if err != nil { @@ -217,7 +217,7 @@ func (r *DSCInitializationReconciler) Reconcile(ctx context.Context, req ctrl.Re } switch platform { - case deploy.SelfManagedRhods: + case cluster.SelfManagedRhods: err := r.createUserGroup(ctx, instance, "rhods-admins") if err != nil { return reconcile.Result{}, err @@ -229,7 +229,7 @@ func (r *DSCInitializationReconciler) Reconcile(ctx context.Context, req ctrl.Re return reconcile.Result{}, err } } - case deploy.ManagedRhods: + case cluster.ManagedRhods: osdConfigsPath := filepath.Join(deploy.DefaultManifestPath, "osd-configs") err = deploy.DeployManifestsFromPath(r.Client, instance, osdConfigsPath, r.ApplicationsNamespace, "osd", true) if err != nil { diff --git a/controllers/dscinitialization/monitoring.go b/controllers/dscinitialization/monitoring.go index ca9b6007bcf..ab36ee13c01 100644 --- a/controllers/dscinitialization/monitoring.go +++ b/controllers/dscinitialization/monitoring.go @@ -77,7 +77,7 @@ func (r *DSCInitializationReconciler) configureManagedMonitoring(ctx context.Con } if initial == "init" { - err := common.UpdatePodSecurityRolebinding(r.Client, []string{"redhat-ods-monitoring"}, dscInit.Spec.Monitoring.Namespace) + err := cluster.UpdatePodSecurityRolebinding(r.Client, dscInit.Spec.Monitoring.Namespace, "redhat-ods-monitoring") if err != nil { return fmt.Errorf("error to update monitoring security rolebinding: %w", err) } @@ -130,7 +130,7 @@ func configureAlertManager(ctx context.Context, dsciInit *dsci.DSCInitialization // r.Log.Info("Success: inject alertmanage-configs.yaml") // special handling for dev-mod - consolelinkDomain, err := common.GetDomain(r.Client, NameConsoleLink, NamespaceConsoleLink) + consolelinkDomain, err := cluster.GetDomain(r.Client) if err != nil { return fmt.Errorf("error getting console route URL : %w", err) } @@ -208,7 +208,7 @@ func configurePrometheus(ctx context.Context, dsciInit *dsci.DSCInitialization, return err } // Update prometheus-config for dashboard, dsp and workbench - consolelinkDomain, err := common.GetDomain(r.Client, NameConsoleLink, NamespaceConsoleLink) + consolelinkDomain, err := cluster.GetDomain(r.Client) if err != nil { return fmt.Errorf("error getting console route URL : %w", err) } diff --git a/controllers/dscinitialization/utils.go b/controllers/dscinitialization/utils.go index 5368efa5523..f585d11d3ec 100644 --- a/controllers/dscinitialization/utils.go +++ b/controllers/dscinitialization/utils.go @@ -20,6 +20,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" dsci "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" ) @@ -190,11 +191,11 @@ func (r *DSCInitializationReconciler) createDefaultRoleBinding(ctx context.Conte } func (r *DSCInitializationReconciler) reconcileDefaultNetworkPolicy(ctx context.Context, name string, dscInit *dsci.DSCInitialization) error { - platform, err := deploy.GetPlatform(r.Client) + platform, err := cluster.GetPlatform(r.Client) if err != nil { return err } - if platform == deploy.ManagedRhods || platform == deploy.SelfManagedRhods { + if platform == cluster.ManagedRhods || platform == cluster.SelfManagedRhods { // Deploy networkpolicy for operator namespace err = deploy.DeployManifestsFromPath(r.Client, dscInit, networkpolicyPath+"/operator", "redhat-ods-operator", "networkpolicy", true) if err != nil { diff --git a/docs/api-overview.md b/docs/api-overview.md index fd1d4432667..038e692f8ad 100644 --- a/docs/api-overview.md +++ b/docs/api-overview.md @@ -126,7 +126,7 @@ _Appears in:_ ## datasciencecluster.opendatahub.io/datasciencepipelines Package datasciencepipelines provides utility functions to config Data Science Pipelines: -Pipeline solution for end to end MLOps workflows that support the Kubeflow Pipelines SDK and Tekton +Pipeline solution for end to end MLOps workflows that support the Kubeflow Pipelines SDK, Tekton and Argo Workflows. @@ -278,6 +278,30 @@ _Appears in:_ +## datasciencecluster.opendatahub.io/trainingoperator + +Package trainingoperator provides utility functions to config trainingoperator as part of the stack +which makes managing distributed compute infrastructure in the cloud easy and intuitive for Data Scientists + + + +#### TrainingOperator + + + +TrainingOperator struct holds the configuration for the TrainingOperator component. + + + +_Appears in:_ +- [Components](#components) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `Component` _[Component](#component)_ | | | | + + + ## datasciencecluster.opendatahub.io/trustyai Package trustyai provides utility functions to config TrustyAI, a bias/fairness and explainability toolkit diff --git a/main.go b/main.go index b6402dfc1a2..38832d25095 100644 --- a/main.go +++ b/main.go @@ -56,8 +56,8 @@ import ( dscicontr "github.com/opendatahub-io/opendatahub-operator/v2/controllers/dscinitialization" "github.com/opendatahub-io/opendatahub-operator/v2/controllers/secretgenerator" "github.com/opendatahub-io/opendatahub-operator/v2/controllers/webhook" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/common" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/upgrade" ) @@ -201,7 +201,7 @@ func main() { //nolint:funlen os.Exit(1) } // Get operator platform - platform, err := deploy.GetPlatform(setupClient) + platform, err := cluster.GetPlatform(setupClient) if err != nil { setupLog.Error(err, "error getting platform") os.Exit(1) diff --git a/pkg/cluster/cluster_config.go b/pkg/cluster/cluster_config.go index b96a9a67b40..457aa7504fa 100644 --- a/pkg/cluster/cluster_config.go +++ b/pkg/cluster/cluster_config.go @@ -5,17 +5,21 @@ import ( "errors" "fmt" "os" + "strings" ofapi "github.com/operator-framework/api/pkg/operators/v1alpha1" + apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster/gvk" ) // +kubebuilder:rbac:groups="config.openshift.io",resources=ingresses,verbs=get func GetDomain(c client.Client) (string, error) { ingress := &unstructured.Unstructured{} - ingress.SetGroupVersionKind(OpenshiftIngressGVK) + ingress.SetGroupVersionKind(gvk.OpenshiftIngress) if err := c.Get(context.TODO(), client.ObjectKey{ Namespace: "", @@ -54,3 +58,65 @@ func GetClusterServiceVersion(ctx context.Context, c client.Client, watchNameSpa return nil, nil } + +type Platform string + +// isSelfManaged checks presence of ClusterServiceVersions: +// when CSV displayname contains OpenDataHub, return 'OpenDataHub,nil' => high priority +// when CSV displayname contains SelfManagedRhods, return 'SelfManagedRhods,nil' +// when in dev mode and could not find CSV (deploy by olm), return "", nil +// otherwise return "",err. +func isSelfManaged(cli client.Client) (Platform, error) { + clusterCsvs := &ofapi.ClusterServiceVersionList{} + err := cli.List(context.TODO(), clusterCsvs) + if err != nil { + return "", err + } else { //nolint:golint,revive // Readability on else + for _, csv := range clusterCsvs.Items { + if strings.Contains(csv.Spec.DisplayName, string(OpenDataHub)) { + return OpenDataHub, nil + } + if strings.Contains(csv.Spec.DisplayName, string(SelfManagedRhods)) { + return SelfManagedRhods, nil + } + } + } + + return Unknown, nil +} + +// isManagedRHODS checks if CRD add-on exists and contains string ManagedRhods. +func isManagedRHODS(cli client.Client) (Platform, error) { + catalogSourceCRD := &apiextv1.CustomResourceDefinition{} + + err := cli.Get(context.TODO(), client.ObjectKey{Name: "catalogsources.operators.coreos.com"}, catalogSourceCRD) + if err != nil { + return "", client.IgnoreNotFound(err) + } + expectedCatlogSource := &ofapi.CatalogSourceList{} + err = cli.List(context.TODO(), expectedCatlogSource) + if err != nil { + return Unknown, err + } + if len(expectedCatlogSource.Items) > 0 { + for _, cs := range expectedCatlogSource.Items { + if cs.Name == string(ManagedRhods) { + return ManagedRhods, nil + } + } + } + + return "", nil +} + +func GetPlatform(cli client.Client) (Platform, error) { + // First check if its addon installation to return 'ManagedRhods, nil' + if platform, err := isManagedRHODS(cli); err != nil { + return Unknown, err + } else if platform == ManagedRhods { + return ManagedRhods, nil + } + + // check and return whether ODH or self-managed platform + return isSelfManaged(cli) +} diff --git a/pkg/cluster/const.go b/pkg/cluster/const.go index de340c42d4c..86772e39579 100644 --- a/pkg/cluster/const.go +++ b/pkg/cluster/const.go @@ -1,23 +1,12 @@ package cluster -import "k8s.io/apimachinery/pkg/runtime/schema" - -var ( - KnativeServingGVK = schema.GroupVersionKind{ - Group: "operator.knative.dev", - Version: "v1beta1", - Kind: "KnativeServing", - } - - OpenshiftIngressGVK = schema.GroupVersionKind{ - Group: "config.openshift.io", - Version: "v1", - Kind: "Ingress", - } - - ServiceMeshControlPlaneGVK = schema.GroupVersionKind{ - Group: "maistra.io", - Version: "v2", - Kind: "ServiceMeshControlPlane", - } +const ( + // ManagedRhods defines expected addon catalogsource. + ManagedRhods Platform = "addon-managed-odh-catalog" + // SelfManagedRhods defines display name in csv. + SelfManagedRhods Platform = "Red Hat OpenShift Data Science" + // OpenDataHub defines display name in csv. + OpenDataHub Platform = "Open Data Hub Operator" + // Unknown indicates that operator is not deployed using OLM. + Unknown Platform = "" ) diff --git a/pkg/cluster/doc.go b/pkg/cluster/doc.go new file mode 100644 index 00000000000..86bb06befb3 --- /dev/null +++ b/pkg/cluster/doc.go @@ -0,0 +1,7 @@ +// Package cluster contains utility functions used to operate on cluster resources. +// +// This includes getting cluster domain, operator namespace and CSV, +// defining different GVK being used in the project, +// config metadata on reousrces, +// managing reousrces like rolebinding, secret and configmap. +package cluster diff --git a/pkg/cluster/gvk/gvk.go b/pkg/cluster/gvk/gvk.go new file mode 100644 index 00000000000..3a4bad2c363 --- /dev/null +++ b/pkg/cluster/gvk/gvk.go @@ -0,0 +1,37 @@ +package gvk + +import "k8s.io/apimachinery/pkg/runtime/schema" + +var ( + KnativeServing = schema.GroupVersionKind{ + Group: "operator.knative.dev", + Version: "v1beta1", + Kind: "KnativeServing", + } + + OpenshiftIngress = schema.GroupVersionKind{ + Group: "config.openshift.io", + Version: "v1", + Kind: "Ingress", + } + + ServiceMeshControlPlane = schema.GroupVersionKind{ + Group: "maistra.io", + Version: "v2", + Kind: "ServiceMeshControlPlane", + } + + OdhApplication = schema.GroupVersionKind{ + Group: "dashboard.opendatahub.io", + Version: "v1", + Kind: "OdhApplication", + } + OdhDocument = schema.GroupVersionKind{ + Group: "dashboard.opendatahub.io", + Version: "v1", Kind: "OdhDocument", + } + OdhQuickStart = schema.GroupVersionKind{ + Group: "console.openshift.io", + Version: "v1", Kind: "OdhQuickStart", + } +) diff --git a/pkg/cluster/resources.go b/pkg/cluster/resources.go index a4517c2c978..736d1ad83b2 100644 --- a/pkg/cluster/resources.go +++ b/pkg/cluster/resources.go @@ -3,16 +3,21 @@ package cluster import ( "context" "fmt" + "time" + v1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" authv1 "k8s.io/api/rbac/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" ) // UpdatePodSecurityRolebinding update default rolebinding which is created in applications namespace by manifests -// being used by different components. +// being used by different components and SRE monitoring. func UpdatePodSecurityRolebinding(cli client.Client, namespace string, serviceAccountsList ...string) error { foundRoleBinding := &authv1.RoleBinding{} if err := cli.Get(context.TODO(), client.ObjectKey{Name: namespace, Namespace: namespace}, foundRoleBinding); err != nil { @@ -146,3 +151,26 @@ func CreateNamespace(cli client.Client, namespace string, metaOptions ...MetaOpt return desiredNamespace, client.IgnoreAlreadyExists(createErr) } + +// WaitForDeploymentAvailable to check if component deployment from 'namespace' is ready within 'timeout' before apply prometheus rules for the component. +func WaitForDeploymentAvailable(ctx context.Context, c client.Client, componentName string, namespace string, interval int, timeout int) error { + resourceInterval := time.Duration(interval) * time.Second + resourceTimeout := time.Duration(timeout) * time.Minute + + return wait.PollUntilContextTimeout(ctx, resourceInterval, resourceTimeout, true, func(ctx context.Context) (bool, error) { + componentDeploymentList := &v1.DeploymentList{} + err := c.List(ctx, componentDeploymentList, client.InNamespace(namespace), client.HasLabels{labels.ODH.Component(componentName)}) + if err != nil { + return false, fmt.Errorf("error fetching list of deployments: %w", err) + } + + fmt.Printf("waiting for %d deployment to be ready for %s\n", len(componentDeploymentList.Items), componentName) + for _, deployment := range componentDeploymentList.Items { + if deployment.Status.ReadyReplicas != deployment.Status.Replicas { + return false, nil + } + } + + return true, nil + }) +} diff --git a/pkg/common/common.go b/pkg/common/common.go index c7edbc60f27..c948c306c80 100644 --- a/pkg/common/common.go +++ b/pkg/common/common.go @@ -15,10 +15,10 @@ limitations under the License. */ // Package common contains utility functions used by different components +// for cluster related common operations, refer to package cluster package common import ( - "context" "crypto/sha256" b64 "encoding/base64" "fmt" @@ -27,48 +27,10 @@ import ( "strings" "github.com/go-logr/logr" - routev1 "github.com/openshift/api/route/v1" "go.uber.org/zap/zapcore" - authv1 "k8s.io/api/rbac/v1" - "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log/zap" ) -// UpdatePodSecurityRolebinding update default rolebinding which is created in namespace by manifests -// being used by different components and sre monitoring. -func UpdatePodSecurityRolebinding(cli client.Client, serviceAccountsList []string, namespace string) error { - foundRoleBinding := &authv1.RoleBinding{} - err := cli.Get(context.TODO(), client.ObjectKey{Name: namespace, Namespace: namespace}, foundRoleBinding) - if err != nil { - return err - } - - for _, sa := range serviceAccountsList { - // Append serviceAccount if not added already - if !subjectExistInRoleBinding(foundRoleBinding.Subjects, sa, namespace) { - foundRoleBinding.Subjects = append(foundRoleBinding.Subjects, authv1.Subject{ - Kind: authv1.ServiceAccountKind, - Name: sa, - Namespace: namespace, - }) - } - } - - return cli.Update(context.TODO(), foundRoleBinding) -} - -// Internal function used by UpdatePodSecurityRolebinding() -// Return whether Rolebinding matching service account and namespace exists or not. -func subjectExistInRoleBinding(subjectList []authv1.Subject, serviceAccountName, namespace string) bool { - for _, subject := range subjectList { - if subject.Name == serviceAccountName && subject.Namespace == namespace { - return true - } - } - - return false -} - // ReplaceStringsInFile replaces variable with value in manifests during runtime. func ReplaceStringsInFile(fileName string, replacements map[string]string) error { // Read the contents of the file @@ -159,17 +121,6 @@ func GetMonitoringData(data string) (string, error) { return encodedData, nil } -// Use openshift-console namespace to get host domain. -func GetDomain(cli client.Client, name string, namespace string) (string, error) { - consoleRoute := &routev1.Route{} - if err := cli.Get(context.TODO(), client.ObjectKey{Name: name, Namespace: namespace}, consoleRoute); err != nil { - return "", fmt.Errorf("error getting %s route URL: %w", name, err) - } - domainIndex := strings.Index(consoleRoute.Spec.Host, ".") - - return consoleRoute.Spec.Host[domainIndex+1:], nil -} - // to use different mode for logging, e.g. development, production // when not set mode it falls to "default" which is used by startup main.go. func ConfigLoggers(mode string) logr.Logger { diff --git a/pkg/deploy/deploy.go b/pkg/deploy/deploy.go index d00747168a7..00c823484f4 100644 --- a/pkg/deploy/deploy.go +++ b/pkg/deploy/deploy.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package deploy +// Package deploy provides utility functions used by each component to deploy manifests to the cluster. package deploy import ( @@ -449,17 +449,6 @@ func removeResourcesFromDeployment(u *unstructured.Unstructured) error { return nil } -// GetSubscription checks if a Subscription for the operator exists in the given namespace. -// if exist, return object; otherwise, return error. -func GetSubscription(cli client.Client, namespace string, name string) (*ofapiv1alpha1.Subscription, error) { - sub := &ofapiv1alpha1.Subscription{} - if err := cli.Get(context.TODO(), client.ObjectKey{Namespace: namespace, Name: name}, sub); err != nil { - // real error or 'not found' both return here - return nil, err - } - return sub, nil -} - func ClusterSubscriptionExists(cli client.Client, name string) (bool, error) { subscriptionList := &ofapiv1alpha1.SubscriptionList{} if err := cli.List(context.TODO(), subscriptionList); err != nil { @@ -474,21 +463,6 @@ func ClusterSubscriptionExists(cli client.Client, name string) (bool, error) { return false, nil } -// DeleteExistingSubscription deletes given Subscription if it exists -// Do not error if the Subscription does not exist. -func DeleteExistingSubscription(cli client.Client, operatorNs string, subsName string) error { - sub, err := GetSubscription(cli, operatorNs, subsName) - if err != nil { - return client.IgnoreNotFound(err) - } - - if err := cli.Delete(context.TODO(), sub); client.IgnoreNotFound(err) != nil { - return fmt.Errorf("error deleting subscription %s: %w", sub.Name, err) - } - - return nil -} - // OperatorExists checks if an Operator with 'operatorPrefix' is installed. // Return true if found it, false if not. // if we need to check exact version of the operator installed, can append vX.Y.Z later. diff --git a/pkg/deploy/setup.go b/pkg/deploy/setup.go deleted file mode 100644 index 248f0bce61e..00000000000 --- a/pkg/deploy/setup.go +++ /dev/null @@ -1,83 +0,0 @@ -package deploy - -import ( - "context" - "strings" - - ofapi "github.com/operator-framework/api/pkg/operators/v1alpha1" - apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -const ( - // ManagedRhods defines expected addon catalogsource. - ManagedRhods Platform = "addon-managed-odh-catalog" - // SelfManagedRhods defines display name in csv. - SelfManagedRhods Platform = "Red Hat OpenShift Data Science" - // OpenDataHub defines display name in csv. - OpenDataHub Platform = "Open Data Hub Operator" - // Unknown indicates that operator is not deployed using OLM. - Unknown Platform = "" -) - -type Platform string - -// isSelfManaged checks presence of ClusterServiceVersions: -// when CSV displayname contains OpenDataHub, return 'OpenDataHub,nil' => high priority -// when CSV displayname contains SelfManagedRhods, return 'SelfManagedRhods,nil' -// when in dev mode and could not find CSV (deploy by olm), return "", nil -// otherwise return "",err. -func isSelfManaged(cli client.Client) (Platform, error) { - clusterCsvs := &ofapi.ClusterServiceVersionList{} - err := cli.List(context.TODO(), clusterCsvs) - if err != nil { - return "", err - } else { //nolint:golint,revive // Readability on else - for _, csv := range clusterCsvs.Items { - if strings.Contains(csv.Spec.DisplayName, string(OpenDataHub)) { - return OpenDataHub, nil - } - if strings.Contains(csv.Spec.DisplayName, string(SelfManagedRhods)) { - return SelfManagedRhods, nil - } - } - } - - return Unknown, nil -} - -// isManagedRHODS checks if CRD add-on exists and contains string ManagedRhods. -func isManagedRHODS(cli client.Client) (Platform, error) { - catalogSourceCRD := &apiextv1.CustomResourceDefinition{} - - err := cli.Get(context.TODO(), client.ObjectKey{Name: "catalogsources.operators.coreos.com"}, catalogSourceCRD) - if err != nil { - return "", client.IgnoreNotFound(err) - } - expectedCatlogSource := &ofapi.CatalogSourceList{} - err = cli.List(context.TODO(), expectedCatlogSource) - if err != nil { - return Unknown, err - } - if len(expectedCatlogSource.Items) > 0 { - for _, cs := range expectedCatlogSource.Items { - if cs.Name == string(ManagedRhods) { - return ManagedRhods, nil - } - } - } - - return "", nil -} - -func GetPlatform(cli client.Client) (Platform, error) { - // First check if its addon installation to return 'ManagedRhods, nil' - if platform, err := isManagedRHODS(cli); err != nil { - return Unknown, err - } else if platform == ManagedRhods { - return ManagedRhods, nil - } - - // check and return whether ODH or self-managed platform - return isSelfManaged(cli) -} diff --git a/pkg/feature/serverless/conditions.go b/pkg/feature/serverless/conditions.go index 54f2498331a..86f0561e812 100644 --- a/pkg/feature/serverless/conditions.go +++ b/pkg/feature/serverless/conditions.go @@ -7,7 +7,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "sigs.k8s.io/controller-runtime/pkg/client" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster/gvk" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature" ) @@ -17,7 +17,7 @@ const ( func EnsureServerlessAbsent(f *feature.Feature) error { list := &unstructured.UnstructuredList{} - list.SetGroupVersionKind(cluster.KnativeServingGVK) + list.SetGroupVersionKind(gvk.KnativeServing) if err := f.Client.List(context.TODO(), list, client.InNamespace("")); err != nil { return fmt.Errorf("failed to list KnativeServings: %w", err) @@ -53,4 +53,4 @@ func EnsureServerlessOperatorInstalled(f *feature.Feature) error { return nil } -var EnsureServerlessServingDeployed = feature.WaitForResourceToBeCreated(KnativeServingNamespace, cluster.KnativeServingGVK) +var EnsureServerlessServingDeployed = feature.WaitForResourceToBeCreated(KnativeServingNamespace, gvk.KnativeServing) diff --git a/pkg/feature/servicemesh/cleanup.go b/pkg/feature/servicemesh/cleanup.go index 960df40b289..e35bebf5be1 100644 --- a/pkg/feature/servicemesh/cleanup.go +++ b/pkg/feature/servicemesh/cleanup.go @@ -7,7 +7,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "sigs.k8s.io/controller-runtime/pkg/client" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster/gvk" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature" ) @@ -16,7 +16,7 @@ func RemoveExtensionProvider(f *feature.Feature) error { mesh := f.Spec.ControlPlane smcp := &unstructured.Unstructured{} - smcp.SetGroupVersionKind(cluster.ServiceMeshControlPlaneGVK) + smcp.SetGroupVersionKind(gvk.ServiceMeshControlPlane) if err := f.Client.Get(context.TODO(), client.ObjectKey{ Namespace: mesh.Namespace, diff --git a/pkg/feature/servicemesh/conditions.go b/pkg/feature/servicemesh/conditions.go index 23e3f7bae06..9f8060c0ef0 100644 --- a/pkg/feature/servicemesh/conditions.go +++ b/pkg/feature/servicemesh/conditions.go @@ -12,6 +12,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster/gvk" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature" ) @@ -74,7 +75,7 @@ func WaitForControlPlaneToBeReady(f *feature.Feature) error { func CheckControlPlaneComponentReadiness(c client.Client, smcpName, smcpNs string) (bool, error) { smcpObj := &unstructured.Unstructured{} - smcpObj.SetGroupVersionKind(cluster.ServiceMeshControlPlaneGVK) + smcpObj.SetGroupVersionKind(gvk.ServiceMeshControlPlane) err := c.Get(context.TODO(), client.ObjectKey{ Namespace: smcpNs, Name: smcpName, diff --git a/pkg/gvk/gvk.go b/pkg/gvk/gvk.go deleted file mode 100644 index f4a261efbe5..00000000000 --- a/pkg/gvk/gvk.go +++ /dev/null @@ -1,9 +0,0 @@ -package gvk - -import "k8s.io/apimachinery/pkg/runtime/schema" - -var ( - OdhApplication = schema.GroupVersionKind{Group: "dashboard.opendatahub.io", Version: "v1", Kind: "OdhApplication"} - OdhDocument = schema.GroupVersionKind{Group: "dashboard.opendatahub.io", Version: "v1", Kind: "OdhDocument"} - OdhQuickStart = schema.GroupVersionKind{Group: "console.openshift.io", Version: "v1", Kind: "OdhQuickStart"} -) diff --git a/pkg/monitoring/monitoring.go b/pkg/monitoring/monitoring.go deleted file mode 100644 index c377a24ed95..00000000000 --- a/pkg/monitoring/monitoring.go +++ /dev/null @@ -1,36 +0,0 @@ -package monitoring - -import ( - "context" - "fmt" - "time" - - v1 "k8s.io/api/apps/v1" - "k8s.io/apimachinery/pkg/util/wait" - "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" -) - -// WaitForDeploymentAvailable to check if component deployment from 'namespace' is ready within 'timeout' before apply prometheus rules for the component. -func WaitForDeploymentAvailable(ctx context.Context, c client.Client, componentName string, namespace string, interval int, timeout int) error { - resourceInterval := time.Duration(interval) * time.Second - resourceTimeout := time.Duration(timeout) * time.Minute - - return wait.PollUntilContextTimeout(ctx, resourceInterval, resourceTimeout, true, func(ctx context.Context) (bool, error) { - componentDeploymentList := &v1.DeploymentList{} - err := c.List(ctx, componentDeploymentList, client.InNamespace(namespace), client.HasLabels{labels.ODH.Component(componentName)}) - if err != nil { - return false, fmt.Errorf("error fetching list of deployments: %w", err) - } - - fmt.Printf("waiting for %d deployment to be ready for %s\n", len(componentDeploymentList.Items), componentName) - for _, deployment := range componentDeploymentList.Items { - if deployment.Status.ReadyReplicas != deployment.Status.Replicas { - return false, nil - } - } - - return true, nil - }) -} diff --git a/pkg/trustedcabundle/trustedcabundle.go b/pkg/trustedcabundle/trustedcabundle.go index 0d597512ac0..74558278f7a 100644 --- a/pkg/trustedcabundle/trustedcabundle.go +++ b/pkg/trustedcabundle/trustedcabundle.go @@ -1,3 +1,4 @@ +// Package trustedcabundle provides utility functions to create and check trusted CA bundle configmap from DSCI CRD package trustedcabundle import ( diff --git a/pkg/upgrade/uninstallation.go b/pkg/upgrade/uninstallation.go index 33dde932eda..e409e4966e8 100644 --- a/pkg/upgrade/uninstallation.go +++ b/pkg/upgrade/uninstallation.go @@ -6,13 +6,13 @@ import ( "time" "github.com/hashicorp/go-multierror" + ofapiv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" corev1 "k8s.io/api/core/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" "sigs.k8s.io/controller-runtime/pkg/client" dsci "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" ) @@ -25,7 +25,7 @@ const ( // OperatorUninstall deletes all the externally generated resources. This includes monitoring resources and applications // installed by KfDef. func OperatorUninstall(ctx context.Context, cli client.Client) error { - platform, err := deploy.GetPlatform(cli) + platform, err := cluster.GetPlatform(cli) if err != nil { return err } @@ -77,12 +77,12 @@ func OperatorUninstall(ctx context.Context, cli client.Client) error { fmt.Printf("Removing operator subscription which in turn will remove installplan\n") subsName := "opendatahub-operator" - if platform == deploy.SelfManagedRhods { + if platform == cluster.SelfManagedRhods { subsName = "rhods-operator" - } else if platform == deploy.ManagedRhods { + } else if platform == cluster.ManagedRhods { subsName = "addon-managed-odh" } - if err := deploy.DeleteExistingSubscription(cli, operatorNs, subsName); err != nil { + if err := DeleteExistingSubscription(cli, operatorNs, subsName); err != nil { return err } @@ -162,3 +162,29 @@ func removeCSV(ctx context.Context, c client.Client) error { fmt.Printf("No clusterserviceversion for the operator found.\n") return nil } + +// DeleteExistingSubscription deletes given Subscription if it exists +// Do not error if the Subscription does not exist. +func DeleteExistingSubscription(cli client.Client, operatorNs string, subsName string) error { + sub, err := getSubscription(cli, operatorNs, subsName) + if err != nil { + return client.IgnoreNotFound(err) + } + + if err := cli.Delete(context.TODO(), sub); client.IgnoreNotFound(err) != nil { + return fmt.Errorf("error deleting subscription %s: %w", sub.Name, err) + } + + return nil +} + +// GetSubscription checks if a Subscription for the operator exists in the given namespace. +// if exist, return object; otherwise, return error. +func getSubscription(cli client.Client, namespace string, name string) (*ofapiv1alpha1.Subscription, error) { + sub := &ofapiv1alpha1.Subscription{} + if err := cli.Get(context.TODO(), client.ObjectKey{Namespace: namespace, Name: name}, sub); err != nil { + // real error or 'not found' both return here + return nil, err + } + return sub, nil +} diff --git a/pkg/upgrade/upgrade.go b/pkg/upgrade/upgrade.go index e4bed22ff76..4609c2dde75 100644 --- a/pkg/upgrade/upgrade.go +++ b/pkg/upgrade/upgrade.go @@ -1,3 +1,5 @@ +// Package upgrade provides functions of upgrade ODH from v1 to v2 and vaiours v2 versions. +// It contains both the logic to upgrade the ODH components and the logic to cleanup the deprecated resources. package upgrade import ( @@ -41,8 +43,7 @@ import ( "github.com/opendatahub-io/opendatahub-operator/v2/components/trustyai" "github.com/opendatahub-io/opendatahub-operator/v2/components/workbenches" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/gvk" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster/gvk" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" ) @@ -123,7 +124,7 @@ func CreateDefaultDSC(ctx context.Context, cli client.Client) error { // createDefaultDSCI creates a default instance of DSCI // If there exists an instance already, it patches the DSCISpec with default values // Note: DSCI CR modifcations are not supported, as it is the initial prereq setting for the components. -func CreateDefaultDSCI(cli client.Client, _ deploy.Platform, appNamespace, monNamespace string) error { +func CreateDefaultDSCI(cli client.Client, _ cluster.Platform, appNamespace, monNamespace string) error { defaultDsciSpec := &dsci.DSCInitializationSpec{ ApplicationsNamespace: appNamespace, Monitoring: dsci.Monitoring{ @@ -177,9 +178,9 @@ func CreateDefaultDSCI(cli client.Client, _ deploy.Platform, appNamespace, monNa return nil } -func UpdateFromLegacyVersion(cli client.Client, platform deploy.Platform, appNS string, montNamespace string) error { +func UpdateFromLegacyVersion(cli client.Client, platform cluster.Platform, appNS string, montNamespace string) error { // If platform is Managed, remove Kfdefs and create default dsc - if platform == deploy.ManagedRhods { + if platform == cluster.ManagedRhods { fmt.Println("starting deletion of Deployment in managed cluster") if err := deleteResource(cli, appNS, "deployment"); err != nil { return err @@ -213,7 +214,7 @@ func UpdateFromLegacyVersion(cli client.Client, platform deploy.Platform, appNS return RemoveKfDefInstances(context.TODO(), cli) } - if platform == deploy.SelfManagedRhods { + if platform == cluster.SelfManagedRhods { // remove label created by previous v2 release which is problematic for Managed cluster fmt.Println("removing labels on Operator Namespace") operatorNamespace, err := cluster.GetOperatorNamespace() @@ -294,10 +295,10 @@ func getDashboardWatsonResources(ns string) []ResourceSpec { } // TODO: remove function once we have a generic solution across all components. -func CleanupExistingResource(ctx context.Context, cli client.Client, platform deploy.Platform, dscApplicationsNamespace, dscMonitoringNamespace string) error { +func CleanupExistingResource(ctx context.Context, cli client.Client, platform cluster.Platform, dscApplicationsNamespace, dscMonitoringNamespace string) error { var multiErr *multierror.Error // Special Handling of cleanup of deprecated model monitoring stack - if platform == deploy.ManagedRhods { + if platform == cluster.ManagedRhods { deprecatedDeployments := []string{"rhods-prometheus-operator"} multiErr = multierror.Append(multiErr, deleteDeprecatedResources(ctx, cli, dscMonitoringNamespace, deprecatedDeployments, &appsv1.DeploymentList{})) diff --git a/tests/integration/features/servicemesh_feature_test.go b/tests/integration/features/servicemesh_feature_test.go index 725401bc93d..4d7ce8a9dcd 100644 --- a/tests/integration/features/servicemesh_feature_test.go +++ b/tests/integration/features/servicemesh_feature_test.go @@ -13,7 +13,7 @@ import ( dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" infrav1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/infrastructure/v1" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster/gvk" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature/servicemesh" "github.com/opendatahub-io/opendatahub-operator/v2/tests/envtestutil" @@ -286,7 +286,7 @@ func createServiceMeshControlPlane(name, namespace string) { } func createSMCPInCluster(smcpObj *unstructured.Unstructured, namespace string) error { - smcpObj.SetGroupVersionKind(cluster.ServiceMeshControlPlaneGVK) + smcpObj.SetGroupVersionKind(gvk.ServiceMeshControlPlane) smcpObj.SetNamespace(namespace) if err := envTestClient.Create(context.TODO(), smcpObj); err != nil { return err @@ -323,7 +323,7 @@ func createSMCPInCluster(smcpObj *unstructured.Unstructured, namespace string) e func getServiceMeshControlPlane(namespace, name string) (*unstructured.Unstructured, error) { smcpObj := &unstructured.Unstructured{} - smcpObj.SetGroupVersionKind(cluster.ServiceMeshControlPlaneGVK) + smcpObj.SetGroupVersionKind(gvk.ServiceMeshControlPlane) err := envTestClient.Get(context.TODO(), client.ObjectKey{ Namespace: namespace,