diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml new file mode 100644 index 000000000..3015475c7 --- /dev/null +++ b/.github/workflows/docs.yml @@ -0,0 +1,24 @@ +name: Build Documentation +on: + push: + branches: + - '**' + tags-ignore: + - '**' +jobs: + deploy-docs: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2.3.1 + + - name: Install and Build + run: | + bash build/bin/build-docs.sh + + - name: Deploy + uses: JamesIves/github-pages-deploy-action@4.1.7 + if: github.ref == 'refs/heads/main' + with: + branch: gh-pages + folder: site diff --git a/.github/workflows/update-internal-repo.yaml b/.github/workflows/update-internal-repo.yaml new file mode 100644 index 000000000..cd64df7bd --- /dev/null +++ b/.github/workflows/update-internal-repo.yaml @@ -0,0 +1,53 @@ +name: Update Internal GHE Repo +on: + push: + branches: + - '**' + tags-ignore: + - '**' + +env: + GITHUB_BRANCH: ${{ github.ref_name }} + REMOTE_REPO: automation-paas-cd-pipeline/mas-gitops + +jobs: + update-repo: + runs-on: ubuntu-latest + steps: + - name: Checkout local repo + uses: actions/checkout@v4 + with: + path: source + + - name: Checkout remote repo + id: checkout_remote + continue-on-error: true + uses: actions/checkout@v4 + with: + github-server-url: https://github.ibm.com + repository: ${{ env.REMOTE_REPO }} + ref: ${{ env.GITHUB_BRANCH }} + token: ${{ secrets.ACTIONS_GITHUB_TOKEN }} + path: target + + - name: Copy over files + if: ${{ steps.checkout_remote.outcome == 'success' }} + run: $GITHUB_WORKSPACE/source/build/bin/copy-gitops.sh -s $GITHUB_WORKSPACE/source -t $GITHUB_WORKSPACE/target + + - name: Commit changes to remote repo + if: ${{ steps.checkout_remote.outcome == 'success' }} + run: | + cd $GITHUB_WORKSPACE/target + git config user.name "GitHub Actions Bot" + git config user.email "<>" + + echo "git: Adding all files in $$GITHUB_WORKSPACE/target directory" + export FILES_ADDED=$(git add -v . | wc -l | xargs) + echo "git: Added ${FILES_ADDED} files" + if [ "$FILES_ADDED" != "0" ]; then + echo "git: Committing files" + git commit -m "Synch commit between ibm-mas/gitops and $REMOTE_REPO" + fi + + echo "Push changes to ${{ env.GITHUB_BRANCH }} branch of https://github.ibm.com/${{ env.REMOTE_REPO }}" + git push origin ${{ env.GITHUB_BRANCH }} diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..9989ccead --- /dev/null +++ b/.gitignore @@ -0,0 +1,6 @@ +root-applications/test/Chart.yaml +root-applications/test/README.md +root-applications/test/values.yaml +root-applications/test/templates/130-configs-app.yaml +__pycache__/main.cpython-39.pyc +site diff --git a/README.md b/README.md index 7aa362c3c..f481e1deb 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,10 @@ -# gitops -A GitOps approach to managing Maximo Application Suite +Maximo Application Suite GitOps +=============================================================================== + +A GitOps approach to managing Maximo Application Suite. + +Documentation +------------------------------------------------------------------------------- +[https://ibm-mas.github.io/gitops/](https://ibm-mas.github.io/gitops/) + +[https://github.com/ibm-mas/gitops-demo/tree/002](https://github.com/ibm-mas/gitops-demo/tree/002) \ No newline at end of file diff --git a/build/bin/build-docs.sh b/build/bin/build-docs.sh new file mode 100644 index 000000000..bac9e50a8 --- /dev/null +++ b/build/bin/build-docs.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +python -m pip install -q mkdocs mkdocs-redirects +python -m pip install -q mkdocs mkdocs-macros-plugin +python -m pip install -q mkdocs mkdocs-drawio-file +mkdocs build --verbose --clean --strict \ No newline at end of file diff --git a/build/bin/copy-gitops.sh b/build/bin/copy-gitops.sh new file mode 100755 index 000000000..90a036224 --- /dev/null +++ b/build/bin/copy-gitops.sh @@ -0,0 +1,46 @@ +#/bin/bash + +echo "build/bin/copy-gitops.sh -s -t " +echo "" +echo "Example usage: " +echo " build/bin/copy-gitops.sh -s /Users/whitfiea/Work/Git/ibm-mas/gitops -t /Users/whitfiea/Work/Git/ibm-mas/mas-gitops" +echo "" + +# Process command line arguments +while [[ $# -gt 0 ]] +do + key="$1" + shift + case $key in + -s|--source) + SOURCE=$1 + shift + ;; + + -t|--target) + TARGET=$1 + shift + ;; + + *) + # unknown option + echo -e "\nUsage Error: Unsupported flag \"${key}\"\n\n" + exit 1 + ;; + esac +done + +: ${SOURCE?"Need to set -s|--source argument for source directory"} +: ${TARGET?"Need to set -t|--target argument for target directory"} + +echo "Deleting files in target" +rm -rvf ${TARGET}/instance-applications/* +rm -rvf ${TARGET}/cluster-applications/* +rm -rvf ${TARGET}/docs/* +rm -rvf ${TARGET}/root-applications/* +rm -v README.md +rm -v LICENSE + +echo "Copying gitops" +cp -vr ${SOURCE}/* ${TARGET} + diff --git a/cluster-applications/000-ibm-operator-catalog/Chart.yaml b/cluster-applications/000-ibm-operator-catalog/Chart.yaml new file mode 100644 index 000000000..0c7003475 --- /dev/null +++ b/cluster-applications/000-ibm-operator-catalog/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +name: ibm-operator-catalog +description: IBM Maximo Operator Catalog +type: application +version: 1.0.0 diff --git a/cluster-applications/000-ibm-operator-catalog/README.md b/cluster-applications/000-ibm-operator-catalog/README.md new file mode 100644 index 000000000..a2b5e634a --- /dev/null +++ b/cluster-applications/000-ibm-operator-catalog/README.md @@ -0,0 +1,3 @@ +IBM Maximo Operator Catalog +=============================================================================== +Installs the `ibm-operator-catalog` `CatalogSource` into the `openshift-marketplace` namespace diff --git a/cluster-applications/000-ibm-operator-catalog/templates/01-default_ServiceAccount.yaml b/cluster-applications/000-ibm-operator-catalog/templates/01-default_ServiceAccount.yaml new file mode 100644 index 000000000..4501bed02 --- /dev/null +++ b/cluster-applications/000-ibm-operator-catalog/templates/01-default_ServiceAccount.yaml @@ -0,0 +1,14 @@ +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: default + namespace: openshift-marketplace + annotations: + argocd.argoproj.io/sync-wave: "000" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +imagePullSecrets: + - name: ibm-entitlement diff --git a/cluster-applications/000-ibm-operator-catalog/templates/02-ibm-entitlement_Secret.yaml b/cluster-applications/000-ibm-operator-catalog/templates/02-ibm-entitlement_Secret.yaml new file mode 100644 index 000000000..97e0c169e --- /dev/null +++ b/cluster-applications/000-ibm-operator-catalog/templates/02-ibm-entitlement_Secret.yaml @@ -0,0 +1,16 @@ +--- +kind: Secret +apiVersion: v1 +metadata: + name: ibm-entitlement + namespace: openshift-marketplace + annotations: + argocd.argoproj.io/sync-wave: "000" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: >- + {{ .Values.ibm_entitlement_key }} diff --git a/cluster-applications/000-ibm-operator-catalog/templates/03-ibm-operator-catalog_CatalogSource.yaml b/cluster-applications/000-ibm-operator-catalog/templates/03-ibm-operator-catalog_CatalogSource.yaml new file mode 100644 index 000000000..f777eb153 --- /dev/null +++ b/cluster-applications/000-ibm-operator-catalog/templates/03-ibm-operator-catalog_CatalogSource.yaml @@ -0,0 +1,24 @@ +--- +apiVersion: operators.coreos.com/v1alpha1 +kind: CatalogSource +metadata: + name: ibm-operator-catalog + namespace: openshift-marketplace + annotations: + argocd.argoproj.io/sync-wave: "001" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + displayName: IBM Maximo Operators ({{ .Values.mas_catalog_version }}) + publisher: IBM + description: Online Catalog Source for IBM Maximo Application Suite + sourceType: grpc + image: "{{ .Values.mas_catalog_image }}:{{ .Values.mas_catalog_version }}" + secrets: + - ibm-entitlement + updateStrategy: + registryPoll: + interval: 45m + priority: 90 diff --git a/cluster-applications/000-ibm-operator-catalog/values.yaml b/cluster-applications/000-ibm-operator-catalog/values.yaml new file mode 100644 index 000000000..1017f0207 --- /dev/null +++ b/cluster-applications/000-ibm-operator-catalog/values.yaml @@ -0,0 +1,4 @@ +--- +mas_catalog_version: v8-230414-amd64 +mas_catalog_image: icr.io/cpopen/ibm-maximo-operator-catalog +ibm_entitlement_key: "" diff --git a/cluster-applications/010-redhat-cert-manager/Chart.yaml b/cluster-applications/010-redhat-cert-manager/Chart.yaml new file mode 100644 index 000000000..edf97a65f --- /dev/null +++ b/cluster-applications/010-redhat-cert-manager/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +name: redhat-cert-manager +description: Redhat OpenShift cert-manager Operator +type: application +version: 1.0.0 diff --git a/cluster-applications/010-redhat-cert-manager/README.md b/cluster-applications/010-redhat-cert-manager/README.md new file mode 100644 index 000000000..9a5714663 --- /dev/null +++ b/cluster-applications/010-redhat-cert-manager/README.md @@ -0,0 +1,3 @@ +Redhat OpenShift cert-manager Operator +=============================================================================== +Installs Redhat OpenShift cert-manager Operator in cert-manager-operator namespace diff --git a/cluster-applications/010-redhat-cert-manager/templates/00-cert-manager_Namespace.yaml b/cluster-applications/010-redhat-cert-manager/templates/00-cert-manager_Namespace.yaml new file mode 100644 index 000000000..2a67c1a3c --- /dev/null +++ b/cluster-applications/010-redhat-cert-manager/templates/00-cert-manager_Namespace.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: cert-manager-operator + annotations: + argocd.argoproj.io/sync-wave: "010" + labels: + argocd.argoproj.io/managed-by: {{ .Values.argo_namespace }} +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} diff --git a/cluster-applications/010-redhat-cert-manager/templates/01-cert-manager_OperatorGroup.yaml b/cluster-applications/010-redhat-cert-manager/templates/01-cert-manager_OperatorGroup.yaml new file mode 100644 index 000000000..e421378eb --- /dev/null +++ b/cluster-applications/010-redhat-cert-manager/templates/01-cert-manager_OperatorGroup.yaml @@ -0,0 +1,16 @@ +--- +apiVersion: operators.coreos.com/v1alpha2 +kind: OperatorGroup +metadata: + name: operatorgroup + namespace: cert-manager-operator + annotations: + argocd.argoproj.io/sync-wave: "011" + argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + targetNamespaces: + - cert-manager-operator diff --git a/cluster-applications/010-redhat-cert-manager/templates/02-cert-manager_Subscription.yaml b/cluster-applications/010-redhat-cert-manager/templates/02-cert-manager_Subscription.yaml new file mode 100644 index 000000000..15a22bb8e --- /dev/null +++ b/cluster-applications/010-redhat-cert-manager/templates/02-cert-manager_Subscription.yaml @@ -0,0 +1,22 @@ +--- +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: openshift-cert-manager-operator + namespace: cert-manager-operator + labels: + operators.coreos.com/openshift-cert-manager-operator.cert-manager-operator: '' +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} + annotations: + argocd.argoproj.io/sync-wave: "012" + argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true +spec: + channel: {{ .Values.channel }} + installPlanApproval: Automatic + name: openshift-cert-manager-operator + source: redhat-operators + sourceNamespace: openshift-marketplace + + diff --git a/cluster-applications/010-redhat-cert-manager/templates/04-postsync-update-sm_Job.yaml b/cluster-applications/010-redhat-cert-manager/templates/04-postsync-update-sm_Job.yaml new file mode 100644 index 000000000..d2cdf7ed8 --- /dev/null +++ b/cluster-applications/010-redhat-cert-manager/templates/04-postsync-update-sm_Job.yaml @@ -0,0 +1,242 @@ + + +{{- /* +TODO: + The secrets being created in AWS SM by this Job are nothing to do with cert-manager + There *has* to be a better way of getting these bits of info into the ibm-db2u and ibm-db2u-database charts +*/}} + +{{- if .Values.run_sync_hooks }} + +{{ $ns := "cert-manager-operator"}} +{{ $aws_secret := "aws"}} +{{ $role_name := "postsync-rhcm-update-sm-r" }} +{{ $sa_name := "postsync-rhcm-update-sm-sa" }} +{{ $rb_name := "postsync-rhcm-update-sm-rb" }} + + +--- +kind: Secret +apiVersion: v1 +metadata: + name: {{ $aws_secret }} + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/sync-wave: "013" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +data: + aws_access_key_id: {{ .Values.sm_aws_access_key_id | b64enc }} + aws_secret_access_key: {{ .Values.sm_aws_secret_access_key | b64enc }} +type: Opaque + +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: {{ $sa_name }} + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/sync-wave: "013" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ $role_name }} + annotations: + argocd.argoproj.io/sync-wave: "013" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +rules: + - verbs: + - get + apiGroups: + - packages.operators.coreos.com + resources: + - packagemanifests + - verbs: + - get + apiGroups: + - config.openshift.io + resources: + - ingresses + + + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ $rb_name }} + annotations: + argocd.argoproj.io/sync-wave: "014" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +subjects: + - kind: ServiceAccount + name: {{ $sa_name }} + namespace: {{ $ns }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ $role_name }} + +--- +apiVersion: batch/v1 +kind: Job +metadata: + # Generate the job name by suffixing with a hash of all chart values + # This is to ensure that ArgoCD will delete and recreate the job if (and only if) anything changes + # Any change to cluster config will trigger a rerun of the job. + # The job is idempotent and quick so no real harm in running it when we don't actually need to. + name: "postsync-rhcm-update-sm-job-{{ .Values | toYaml | adler32sum }}" + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/sync-wave: "015" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + template: +{{- if .Values.custom_labels }} + metadata: + labels: +{{ .Values.custom_labels | toYaml | indent 8 }} +{{- end }} + spec: + containers: + - name: run + image: quay.io/ibmmas/cli:9.4.0-pre.gitops + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 200m + memory: 512Mi + requests: + cpu: 10m + memory: 64Mi + env: + - name: ACCOUNT_ID + value: {{ .Values.account_id }} + - name: REGION_ID + value: {{ .Values.region_id }} + - name: CLUSTER_ID + value: {{ .Values.cluster_id }} + # Hard-coded for now: + - name: AVP_TYPE + value: "aws" + volumeMounts: + - name: aws + mountPath: /etc/mas/creds/aws + command: + - /bin/sh + - -c + - | + + + set -e + + # might as well take advantage of gitops_utils for sm_ functions as we're using the cli image + source /mascli/functions/gitops_utils + + function wait_for_cluster_resource { + RES_TYPE="$1" + RES_NAME="$2" + RETRIES=${3:-10} + RETRY_DELAY_SECONDS=${4:-30} + + for (( c=1; c<="${RETRIES}"; c++ )); do + + echo "... attempt ${c} of ${RETRIES}" + + rc=0 + oc get "${RES_TYPE}/${RES_NAME}" -n "${RES_NAMESPACE}" || rc=$? + if [[ "$rc" == "0" ]]; then + echo "...... success" + return 0 + fi + + if [[ "${c}" -lt "${RETRIES}" ]]; then + echo "...... failed (rc: ${rc}), retry in ${RETRY_DELAY_SECONDS}s" + sleep $RETRY_DELAY_SECONDS + fi + done + + echo "...... failed, no attempts remain" + return 1 + } + + + echo "" + echo "================================================================================" + echo "Waiting for PackageManifest db2u-operator to be present before continuing (timeout 300s)" + echo "================================================================================" + wait_for_cluster_resource "PackageManifest" "db2u-operator" + + echo "" + echo "================================================================================" + echo "Waiting for ingress.config.openshift.io cluster to be present before continuing (timeout 300s)" + echo "================================================================================" + wait_for_cluster_resource "ingress.config.openshift.io" "cluster" + + + # NOTE: cannot just render AWS secrets into here, as it will be exposed in the ArgoCD UI + # Instead, we pass them into a secret (ArgoCD knows to hide any data fields in k8s secrets), + # mount the secret on the jobs filesystem, and read them in here + SM_AWS_ACCESS_KEY_ID=$(cat /etc/mas/creds/aws/aws_access_key_id) + SM_AWS_SECRET_ACCESS_KEY=$(cat /etc/mas/creds/aws/aws_secret_access_key) + + echo "Fetching defaultChannel from db2u-operator PackageManifest" + export DB2_DEFAULT_CHANNEL=$(oc get PackageManifest db2u-operator -o=jsonpath="{.status.defaultChannel}") + if [[ -z "${DB2_DEFAULT_CHANNEL}" ]]; then + echo "Failed to fetch defaultChannel" + exit 1 + fi + + echo "Fetching domain from ingress.config.openshift.io cluster" + export CLUSTER_DOMAIN=$(oc get ingress.config.openshift.io cluster -o=jsonpath='{.spec.domain}') + if [[ -z "${CLUSTER_DOMAIN}" ]]; then + echo "Failed to fetch domain" + exit 1 + fi + + export SM_AWS_REGION=${REGION_ID} + sm_login + + + # Used by the Subscription resource in the ibm-db2u chart (https://github.com/ibm-mas/gitops/blob/5fdeaacb9180756d52da3708f68cfcc1949c4c98/applications/060-ibm-db2u/templates/03-db2_Subscription.yaml#L10) + SECRET_NAME_DB2_DEFAULT_CHANNEL=${ACCOUNT_ID}/${CLUSTER_ID}/db2_default_channel + sm_update_secret $SECRET_NAME_DB2_DEFAULT_CHANNEL "{\"db2_default_channel\": \"$DB2_DEFAULT_CHANNEL\" }" + + # Used by resources in the ibm-db2u-database chart (https://github.com/ibm-mas/gitops/tree/5fdeaacb9180756d52da3708f68cfcc1949c4c98/applications/120-ibm-db2u-database) + SECRET_NAME_CLUSTER_DOMAIN=${ACCOUNT_ID}/${CLUSTER_ID}/cluster_domain + sm_update_secret $SECRET_NAME_CLUSTER_DOMAIN "{\"cluster_domain\": \"$CLUSTER_DOMAIN\" }" + + + + + restartPolicy: Never + + # TODO: is this the correct SA to use here? + # No, probably want to add a more restricted SA that can just do things that these post-sync jobs need to do + serviceAccountName: {{ $sa_name }} + volumes: + - name: aws + secret: + secretName: {{ $aws_secret }} + defaultMode: 420 + optional: false + backoffLimit: 4 +{{- end }} diff --git a/cluster-applications/010-redhat-cert-manager/values.yaml b/cluster-applications/010-redhat-cert-manager/values.yaml new file mode 100644 index 000000000..ed97d539c --- /dev/null +++ b/cluster-applications/010-redhat-cert-manager/values.yaml @@ -0,0 +1 @@ +--- diff --git a/cluster-applications/020-ibm-dro/Chart.yaml b/cluster-applications/020-ibm-dro/Chart.yaml new file mode 100644 index 000000000..68a930637 --- /dev/null +++ b/cluster-applications/020-ibm-dro/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +name: ibm-dro +description: IBM DRO +type: application +version: 1.0.0 diff --git a/cluster-applications/020-ibm-dro/README.md b/cluster-applications/020-ibm-dro/README.md new file mode 100644 index 000000000..3466d955a --- /dev/null +++ b/cluster-applications/020-ibm-dro/README.md @@ -0,0 +1,5 @@ +IBM DRO +=============================================================================== +Deploy and configure dro (Data Reporter Operator). + +The dro_cmm_setup being set to true is used to configure connectivity to CMM which is an internal IBM tool, and is not required outside of IBM. \ No newline at end of file diff --git a/cluster-applications/020-ibm-dro/templates/00-dro_Namespace.yaml b/cluster-applications/020-ibm-dro/templates/00-dro_Namespace.yaml new file mode 100644 index 000000000..da75a47da --- /dev/null +++ b/cluster-applications/020-ibm-dro/templates/00-dro_Namespace.yaml @@ -0,0 +1,17 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + # This means argocd will delete this namespace if/when the corresponding application is deleted. + # This could be bad, e.g. + # what if the user specifies a "shared" namespace as dro_namespace? (in fact I think the default "redhat-marketplace" we have for this is one of these) + # Not sure what the correct answer is here; we can't rely on specification of the namespace in argocd appset.destination.namespace + # (since that is now shared by lots of different apps) + name: "{{ .Values.dro_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "020" + labels: + argocd.argoproj.io/managed-by: {{ .Values.argo_namespace }} +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} diff --git a/cluster-applications/020-ibm-dro/templates/01-dro_OperatorGroup.yaml b/cluster-applications/020-ibm-dro/templates/01-dro_OperatorGroup.yaml new file mode 100644 index 000000000..a2ee7d2ff --- /dev/null +++ b/cluster-applications/020-ibm-dro/templates/01-dro_OperatorGroup.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: operators.coreos.com/v1 +kind: OperatorGroup +metadata: + name: ibm-mas-operator-group + namespace: "{{ .Values.dro_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "021" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + targetNamespaces: + - "{{ .Values.dro_namespace }}" diff --git a/cluster-applications/020-ibm-dro/templates/02-dro-pull_Secret.yaml b/cluster-applications/020-ibm-dro/templates/02-dro-pull_Secret.yaml new file mode 100644 index 000000000..7224a0b75 --- /dev/null +++ b/cluster-applications/020-ibm-dro/templates/02-dro-pull_Secret.yaml @@ -0,0 +1,15 @@ +--- +kind: Secret +apiVersion: v1 +metadata: + name: redhat-marketplace-pull-secret + namespace: "{{ .Values.dro_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "021" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +type: Opaque +data: + PULL_SECRET: {{ .Values.ibm_entitlement_key }} diff --git a/cluster-applications/020-ibm-dro/templates/03-imo_Subscription.yaml b/cluster-applications/020-ibm-dro/templates/03-imo_Subscription.yaml new file mode 100644 index 000000000..66b3536b7 --- /dev/null +++ b/cluster-applications/020-ibm-dro/templates/03-imo_Subscription.yaml @@ -0,0 +1,19 @@ +--- +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: ibm-metrics-operator + namespace: "{{ .Values.dro_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "022" + labels: + app.kubernetes.io/name: imo +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + channel: stable + installPlanApproval: Automatic + name: ibm-metrics-operator + source: certified-operators + sourceNamespace: openshift-marketplace diff --git a/cluster-applications/020-ibm-dro/templates/04-dro_Subscription.yaml b/cluster-applications/020-ibm-dro/templates/04-dro_Subscription.yaml new file mode 100644 index 000000000..d084adfa5 --- /dev/null +++ b/cluster-applications/020-ibm-dro/templates/04-dro_Subscription.yaml @@ -0,0 +1,19 @@ +--- +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: ibm-data-reporter-operator + namespace: "{{ .Values.dro_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "022" + labels: + app.kubernetes.io/name: dro +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + channel: stable + installPlanApproval: Automatic + name: ibm-data-reporter-operator + source: certified-operators + sourceNamespace: openshift-marketplace diff --git a/cluster-applications/020-ibm-dro/templates/05-rmo_Subscription.yaml b/cluster-applications/020-ibm-dro/templates/05-rmo_Subscription.yaml new file mode 100644 index 000000000..f044359a4 --- /dev/null +++ b/cluster-applications/020-ibm-dro/templates/05-rmo_Subscription.yaml @@ -0,0 +1,19 @@ +--- +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: redhat-marketplace-operator + namespace: "{{ .Values.dro_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "022" + labels: + app.kubernetes.io/name: rmo +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + channel: stable + installPlanApproval: Automatic + name: redhat-marketplace-operator + source: certified-operators + sourceNamespace: openshift-marketplace diff --git a/cluster-applications/020-ibm-dro/templates/06-marketplaceconfig_Marketplaceconfig.yaml b/cluster-applications/020-ibm-dro/templates/06-marketplaceconfig_Marketplaceconfig.yaml new file mode 100644 index 000000000..1949ffac1 --- /dev/null +++ b/cluster-applications/020-ibm-dro/templates/06-marketplaceconfig_Marketplaceconfig.yaml @@ -0,0 +1,15 @@ +apiVersion: marketplace.redhat.com/v1alpha1 +kind: MarketplaceConfig +metadata: + name: marketplaceconfig + namespace: "{{ .Values.dro_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "024" + argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + license: + accept: true \ No newline at end of file diff --git a/cluster-applications/020-ibm-dro/templates/07-dro-api-token_Secret.yaml b/cluster-applications/020-ibm-dro/templates/07-dro-api-token_Secret.yaml new file mode 100644 index 000000000..02eb43449 --- /dev/null +++ b/cluster-applications/020-ibm-dro/templates/07-dro-api-token_Secret.yaml @@ -0,0 +1,15 @@ +--- +kind: Secret +apiVersion: v1 +metadata: + name: ibm-data-reporter-operator-api-token + namespace: "{{ .Values.dro_namespace }}" + labels: + secret-owner: ibm-data-reporter-operator-api +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} + annotations: + argocd.argoproj.io/sync-wave: "025" + kubernetes.io/service-account.name: ibm-data-reporter-operator-api +type: kubernetes.io/service-account-token diff --git a/cluster-applications/020-ibm-dro/templates/07-dro_rbac.yaml b/cluster-applications/020-ibm-dro/templates/07-dro_rbac.yaml new file mode 100644 index 000000000..d1f1352fb --- /dev/null +++ b/cluster-applications/020-ibm-dro/templates/07-dro_rbac.yaml @@ -0,0 +1,86 @@ +# Source: redhat-marketplace-operator-template-chart/templates/role_binding.yaml +# https://github.com/redhat-marketplace/redhat-marketplace-operator/blob/develop/v2/config/rbac_classic/role_binding.yaml +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: metric-state-view-binding + annotations: + argocd.argoproj.io/sync-wave: "025" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +subjects: +- kind: ServiceAccount + name: ibm-metrics-operator-metric-state + namespace: openshift-redhat-marketplace +- kind: ServiceAccount + name: ibm-metrics-operator-metric-state + namespace: "{{ .Values.dro_namespace }}" +- kind: ServiceAccount + name: ibm-metrics-operator-metric-state + namespace: ibm-common-services +- kind: ServiceAccount + name: ibm-metrics-operator-metric-state + namespace: ibm-software-central +roleRef: + kind: ClusterRole + name: view + apiGroup: rbac.authorization.k8s.io +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: reporter-cluster-monitoring-binding + annotations: + argocd.argoproj.io/sync-wave: "025" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +subjects: +- kind: ServiceAccount + name: ibm-metrics-operator-reporter + namespace: openshift-redhat-marketplace +- kind: ServiceAccount + name: ibm-metrics-operator-reporter + namespace: "{{ .Values.dro_namespace }}" +- kind: ServiceAccount + name: ibm-metrics-operator-reporter + namespace: ibm-common-services +- kind: ServiceAccount + name: ibm-metrics-operator-reporter + namespace: ibm-software-central +roleRef: + kind: ClusterRole + name: cluster-monitoring-view + apiGroup: rbac.authorization.k8s.io +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: manager-cluster-monitoring-binding + annotations: + argocd.argoproj.io/sync-wave: "025" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +subjects: +- kind: ServiceAccount + name: ibm-metrics-operator-controller-manager + namespace: openshift-redhat-marketplace +- kind: ServiceAccount + name: ibm-metrics-operator-controller-manager + namespace: "{{ .Values.dro_namespace }}" +- kind: ServiceAccount + name: ibm-metrics-operator-controller-manager + namespace: ibm-common-services +- kind: ServiceAccount + name: ibm-metrics-operator-controller-manager + namespace: ibm-software-central +roleRef: + kind: ClusterRole + name: cluster-monitoring-view + apiGroup: rbac.authorization.k8s.io diff --git a/cluster-applications/020-ibm-dro/templates/08-postsync-update-sm_Job.yaml b/cluster-applications/020-ibm-dro/templates/08-postsync-update-sm_Job.yaml new file mode 100644 index 000000000..2e53a569c --- /dev/null +++ b/cluster-applications/020-ibm-dro/templates/08-postsync-update-sm_Job.yaml @@ -0,0 +1,234 @@ +{{- if .Values.run_sync_hooks }} + +{{ $ns := .Values.dro_namespace}} +{{ $aws_secret := "aws"}} +{{ $role_name := "postsync-ibm-dro-update-sm-r" }} +{{ $sa_name := "postsync-ibm-dro-update-sm-sa" }} +{{ $rb_name := "postsync-ibm-dro-update-sm-rb" }} + + +--- +kind: Secret +apiVersion: v1 +metadata: + name: {{ $aws_secret }} + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/sync-wave: "026" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +data: + aws_access_key_id: {{ .Values.sm_aws_access_key_id | b64enc }} + aws_secret_access_key: {{ .Values.sm_aws_secret_access_key | b64enc }} +type: Opaque + +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: {{ $sa_name }} + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/sync-wave: "026" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} + +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ $role_name }} + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/sync-wave: "026" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +rules: + - verbs: + - get + apiGroups: + - route.openshift.io + resources: + - routes + + +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ $rb_name }} + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/sync-wave: "027" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +subjects: + - kind: ServiceAccount + name: {{ $sa_name }} + namespace: {{ $ns }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ $role_name }} + +--- +apiVersion: batch/v1 +kind: Job +metadata: + # Generate the job name by suffixing with a hash of all chart values + # This is to ensure that ArgoCD will delete and recreate the job if (and only if) anything changes + # Any change to cluster config will trigger a rerun of the job. + # We can refine this in future to only take into account a subset of instance config (perhaps just values under ibm_dro?). + # But the job is idempotent and quick so no real harm in running it when we don't actually need to. + name: "postsync-ibm-dro-update-sm-job-{{ .Values | toYaml | adler32sum }}" + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/sync-wave: "028" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + template: +{{- if .Values.custom_labels }} + metadata: + labels: +{{ .Values.custom_labels | toYaml | indent 8 }} +{{- end }} + spec: + containers: + - name: run + image: quay.io/ibmmas/cli:9.4.0-pre.gitops + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 200m + memory: 512Mi + requests: + cpu: 10m + memory: 64Mi + env: + - name: ACCOUNT_ID + value: {{ .Values.account_id }} + - name: REGION_ID + value: {{ .Values.region_id }} + - name: CLUSTER_ID + value: {{ .Values.cluster_id }} + - name: DRO_NAMESPACE + value: {{ .Values.dro_namespace }} + # Hard-coded for now: + - name: AVP_TYPE + value: "aws" + volumeMounts: + - name: aws + mountPath: /etc/mas/creds/aws + - name: ibm-data-reporter-operator-api-token + mountPath: /etc/mas/creds/ibm-data-reporter-operator-api-token + command: + - /bin/sh + - -c + - | + + set -e + + # might as well take advantage of gitops_utils for sm_ functions as we're using the cli image + source /mascli/functions/gitops_utils + + function wait_for_resource { + RES_TYPE="$1" + RES_NAME="$2" + RES_NS="$3" + RETRIES=${4:-10} + RETRY_DELAY_SECONDS=${5:-30} + + for (( c=1; c<="${RETRIES}"; c++ )); do + + echo "... attempt ${c} of ${RETRIES}" + + rc=0 + oc get "${RES_TYPE}/${RES_NAME}" -n "${RES_NAMESPACE}" || rc=$? + if [[ "$rc" == "0" ]]; then + echo "...... success" + return 0 + fi + + if [[ "${c}" -lt "${RETRIES}" ]]; then + echo "...... failed (rc: ${rc}), retry in ${RETRY_DELAY_SECONDS}s" + sleep $RETRY_DELAY_SECONDS + fi + done + + echo "...... failed, no attempts remain" + return 1 + } + + + echo "" + echo "================================================================================" + echo "Waiting for route ibm-data-reporter to be present before continuing (timeout 300s)" + echo "================================================================================" + wait_for_resource "route" "ibm-data-reporter" "${DRO_NAMESPACE}" + + + # NOTE: cannot just render AWS secrets into here, as it will be exposed in the ArgoCD UI + # Instead, we pass them into a secret (ArgoCD knows to hide any data fields in k8s secrets), + # mount the secret on the jobs filesystem, and read them in here + SM_AWS_ACCESS_KEY_ID=$(cat /etc/mas/creds/aws/aws_access_key_id) + SM_AWS_SECRET_ACCESS_KEY=$(cat /etc/mas/creds/aws/aws_secret_access_key) + + export DRO_HOST="$(oc get route ibm-data-reporter -n ${DRO_NAMESPACE} -ojsonpath='{.spec.host}')" + if [[ -z "${DRO_HOST}" ]]; then + echo "Failed to fetch dro host from route" + exit 1 + fi + export DRO_URL="https://${DRO_HOST}" + + echo "Fetching token from ibm-data-reporter-operator-api-token Secret in ${DRO_NAMESPACE}" + export DRO_API_TOKEN=$(cat /etc/mas/creds/ibm-data-reporter-operator-api-token/token) + if [[ -z "${DRO_API_TOKEN}" ]]; then + echo "Failed to fetch token" + exit 1 + fi + + + # aws configure set aws_access_key_id $SM_AWS_ACCESS_KEY_ID + # aws configure set aws_secret_access_key $SM_AWS_SECRET_ACCESS_KEY + # aws configure set default.region $REGION_ID + # aws configure list + export SM_AWS_REGION=${REGION_ID} + sm_login + + # aws secretsmanager create-secret --name ${SECRET_NAME} --secret-string "${SECRET_VALUE}" + SECRET_NAME_DRO=${ACCOUNT_ID}/${CLUSTER_ID}/dro + sm_update_secret $SECRET_NAME_DRO "{\"dro_api_token\": \"$DRO_API_TOKEN\", \"dro_url\": \"$DRO_URL\" }" + + + restartPolicy: Never + + # TODO: is this the correct SA to use here? + # No, probably want to add a more restricted SA that can just do things that these post-sync jobs need to do + serviceAccountName: {{ $sa_name }} + volumes: + - name: aws + secret: + secretName: {{ $aws_secret }} + defaultMode: 420 + optional: false + - name: ibm-data-reporter-operator-api-token + secret: + secretName: ibm-data-reporter-operator-api-token + defaultMode: 420 + optional: false + backoffLimit: 4 +{{- end }} + + + diff --git a/cluster-applications/020-ibm-dro/templates/09-dro-cmm_Secret.yaml b/cluster-applications/020-ibm-dro/templates/09-dro-cmm_Secret.yaml new file mode 100644 index 000000000..e11b9cf54 --- /dev/null +++ b/cluster-applications/020-ibm-dro/templates/09-dro-cmm_Secret.yaml @@ -0,0 +1,39 @@ +{{- if .Values.dro_cmm_setup }} + +--- +apiVersion: v1 +kind: Secret +metadata: + name: dest-header-map-secret + namespace: "{{ .Values.dro_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "029" +type: Opaque +stringData: + accept: application/json +--- +apiVersion: v1 +kind: Secret +metadata: + name: auth-header-map-secret + namespace: "{{ .Values.dro_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "029" +type: Opaque +stringData: + accept: application/json + Content-Type: application/json +--- +apiVersion: v1 +kind: Secret +metadata: + name: auth-body-data-secret + namespace: "{{ .Values.dro_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "029" +type: Opaque +stringData: + bodydata: | + {"apikey": "{{ .Values.dro_cmm.auth_apikey }}"} + +{{- end }} \ No newline at end of file diff --git a/cluster-applications/020-ibm-dro/templates/10-dro-cmm_ConfigMap.yaml b/cluster-applications/020-ibm-dro/templates/10-dro-cmm_ConfigMap.yaml new file mode 100644 index 000000000..e64ce8a9c --- /dev/null +++ b/cluster-applications/020-ibm-dro/templates/10-dro-cmm_ConfigMap.yaml @@ -0,0 +1,42 @@ +{{- if .Values.dro_cmm_setup }} + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: kazaam-configmap + namespace: "{{ .Values.dro_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "029" +data: + kazaam.json: | + [ + { + "operation": "timestamp", + "spec": { + "timestamp": { + "inputFormat": "2006-01-02T15:04:05.999999+00:00", + "outputFormat": "$unixmilli" + } + } + }, + { + "operation": "shift", + "spec": { + "instances[0].instanceId": "properties.source", + "instances[0].startTime": "timestamp", + "instances[0].endTime": "timestamp", + "instances[0].metricUsage[0].metricId": "properties.unit", + "instances[0].metricUsage[0].quantity": "properties.quantity" + } + }, + { + "operation": "default", + "spec": { + "meteringModel": "point-in-time", + "meteringPlan": "contract" + } + } + ] + +{{- end }} \ No newline at end of file diff --git a/cluster-applications/020-ibm-dro/templates/11-dro-cmm_DataReporterConfig.yaml b/cluster-applications/020-ibm-dro/templates/11-dro-cmm_DataReporterConfig.yaml new file mode 100644 index 000000000..3f78c4082 --- /dev/null +++ b/cluster-applications/020-ibm-dro/templates/11-dro-cmm_DataReporterConfig.yaml @@ -0,0 +1,54 @@ +{{- if .Values.dro_cmm_setup }} + +--- +apiVersion: marketplace.redhat.com/v1alpha1 +kind: DataReporterConfig +metadata: + name: datareporterconfig + namespace: "{{ .Values.dro_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "030" +spec: + confirmDelivery: false + dataFilters: + - altDestinations: + - authorization: + authDestHeader: Authorization + authDestHeaderPrefix: 'Bearer ' + bodyData: + secretKeyRef: + name: auth-body-data-secret + key: bodydata + header: + secret: + name: auth-header-map-secret + tokenExpr: $.token + url: "{{ .Values.dro_cmm.auth_url }}" + header: + secret: + name: dest-header-map-secret + transformer: + configMapKeyRef: + key: kazaam.json + name: kazaam-configmap + type: kazaam + url: "{{ .Values.dro_cmm.cmm_url }}" + urlSuffixExpr: $.properties.productId + manifestType: dataReporter + selector: + matchExpressions: + - $[?($.event == "Account Contractual Usage")] + - $.properties.productId + - $[?($.properties.source != null)] + - $[?($.properties.unit == "AppPoints")] + - $[?($.properties.quantity >= 0)] + - $[?($.timestamp != null)] + matchUsers: + - system:serviceaccount:{{ .Values.dro_namespace }}:ibm-data-reporter-operator-api + transformer: + configMapKeyRef: + key: kazaam.json + name: kazaam-configmap + type: kazaam + +{{- end }} \ No newline at end of file diff --git a/cluster-applications/020-ibm-dro/templates/postdelete-MarketplaceConfigs-resources.yaml b/cluster-applications/020-ibm-dro/templates/postdelete-MarketplaceConfigs-resources.yaml new file mode 100644 index 000000000..3ed803055 --- /dev/null +++ b/cluster-applications/020-ibm-dro/templates/postdelete-MarketplaceConfigs-resources.yaml @@ -0,0 +1,69 @@ + +# postdelete-MarketplaceConfigs-resources template in the ibm-dro-cleanup chart depends on these resources +# if you make any updates here, ensure that template is updated to align +{{ $role_name := "postdelete-delete-marketplaceconfigs-r" }} +{{ $sa_name := "postdelete-delete-marketplaceconfigs-sa" }} +{{ $rb_name := "postdelete-delete-marketplaceconfigs-rb" }} +{{ $np_name := "postdelete-delete-marketplaceconfigs-np" }} +{{ $job_label := "postdelete-delete-marketplaceconfigs-job" }} +{{ $ns := .Values.dro_namespace }} + + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ $sa_name }} + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/sync-wave: "000" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} + +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ $role_name }} + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/sync-wave: "000" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +rules: + - verbs: + - get + - watch + - list + - update + - delete + apiGroups: + - marketplace.redhat.com + resources: + - marketplaceconfigs + +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ $rb_name }} + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/sync-wave: "001" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +subjects: + - kind: ServiceAccount + name: {{ $sa_name }} + namespace: {{ $ns }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ $role_name }} + diff --git a/cluster-applications/020-ibm-dro/values.yaml b/cluster-applications/020-ibm-dro/values.yaml new file mode 100644 index 000000000..a86d235c1 --- /dev/null +++ b/cluster-applications/020-ibm-dro/values.yaml @@ -0,0 +1,4 @@ +--- +dro_namespace: "redhat-marketplace" +ibm_entitlement_key: "" +dro_cmm_setup: false \ No newline at end of file diff --git a/cluster-applications/021-ibm-dro-cleanup/Chart.yaml b/cluster-applications/021-ibm-dro-cleanup/Chart.yaml new file mode 100644 index 000000000..fbf37cec1 --- /dev/null +++ b/cluster-applications/021-ibm-dro-cleanup/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +name: ibm-drop-cleanup +description: IBM DRO (Cleanup) +type: application +version: 1.0.0 diff --git a/cluster-applications/021-ibm-dro-cleanup/README.md b/cluster-applications/021-ibm-dro-cleanup/README.md new file mode 100644 index 000000000..8da02258e --- /dev/null +++ b/cluster-applications/021-ibm-dro-cleanup/README.md @@ -0,0 +1,6 @@ +IBM DRO Cleanup +=============================================================================== +Contains a PostDelete hook that issues deletes for MarketplaceConfig CRs to allow ibm-dro application uninstall to proceed. +This chart must be managed by an Application in a later syncwave than ibm-dro to ensure the PostDelete hook can +complete before the ibm dro application is removed (otherwise the pods responsible for managing the MarketplaceConfig +finalizers will be removed before they get a chance to complete). \ No newline at end of file diff --git a/cluster-applications/021-ibm-dro-cleanup/templates/00-placeholder_ConfigMap.yaml b/cluster-applications/021-ibm-dro-cleanup/templates/00-placeholder_ConfigMap.yaml new file mode 100644 index 000000000..e96763354 --- /dev/null +++ b/cluster-applications/021-ibm-dro-cleanup/templates/00-placeholder_ConfigMap.yaml @@ -0,0 +1,16 @@ +--- +# This to prevent AVP from complaining about there being no manifests +# if none of the other resources in this chart end up being rendered +apiVersion: v1 +kind: ConfigMap +metadata: + name: placeholder + namespace: "{{ .Values.dro_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "00" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +data: + nodata: "" \ No newline at end of file diff --git a/cluster-applications/021-ibm-dro-cleanup/templates/postdelete-MarketplaceConfigs.yaml b/cluster-applications/021-ibm-dro-cleanup/templates/postdelete-MarketplaceConfigs.yaml new file mode 100644 index 000000000..0683dd22f --- /dev/null +++ b/cluster-applications/021-ibm-dro-cleanup/templates/postdelete-MarketplaceConfigs.yaml @@ -0,0 +1,106 @@ + +{{ $job_name := "postdelete-delete-marketplaceconfigs-job" }} + +# NOTE: depends on resources created in ibm-dro chart (postdelete-MarketplaceConfigs-resources) +# The values below must align with the values in that file +{{ $role_name := "postdelete-delete-marketplaceconfigs-r" }} +{{ $sa_name := "postdelete-delete-marketplaceconfigs-sa" }} +{{ $rb_name := "postdelete-delete-marketplaceconfigs-rb" }} +{{ $np_name := "postdelete-delete-marketplaceconfigs-np" }} +{{ $job_label := "postdelete-delete-marketplaceconfigs-job" }} +{{ $ns := .Values.dro_namespace }} + + +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ $job_name }} + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/hook: PostDelete +spec: + template: + metadata: + labels: + app: {{ $job_label }} +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 8 }} +{{- end }} + spec: + containers: + - name: run + # TODO: use a dedicated image with a smaller footprint for this sort of thing? + # Just using cli for now since it has all the deps we need to talk with AWS SM + image: quay.io/ibmmas/cli:9.4.0-pre.gitops + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 200m + memory: 512Mi + requests: + cpu: 10m + memory: 64Mi + env: + - name: CR_NAMESPACE + value: {{ $ns }} + + volumeMounts: [] + command: + - /bin/sh + - -c + - | + + set -e + + function delete_oc_resource(){ + RESOURCE=$1 + NAMESPACE=$2 + echo + echo "------------------------------------------------------------------" + echo "Check if resource $RESOURCE is present in namespace $NAMESPACE " + + # don't want a non-zero rc from oc delete to cause the job to fail + # so, temporarily set +e + set +e + RESOURCE_NAME=$(oc get $RESOURCE -n $NAMESPACE -o=jsonpath="{.metadata.name}") + set -e + if [[ -z "${RESOURCE_NAME}" ]]; then + echo "$RESOURCE not found, skipping" + return 0 + fi + + echo "oc delete resource $RESOURCE in namespace $NAMESPACE " + + # don't want a non-zero rc from oc delete to cause the job to fail (since we then want to try patching out the finalizers) + # so, temporarily set +e + set +e + oc delete $RESOURCE -n $NAMESPACE --timeout=300s --wait=true + return_code=$? + set -e + + echo "Verify that resource $RESOURCE is now absent in namespace $NAMESPACE " + # don't want a non-zero rc from oc delete to cause the job to fail + # so, temporarily set +e + set +e + RESOURCE_NAME=$(oc get $RESOURCE -n $NAMESPACE -o=jsonpath="{.metadata.name}") + set -e + if [[ -n "${RESOURCE_NAME}" ]]; then + echo "$RESOURCE still present, failing job" + exit 1 + fi + + echo "... verified" + return 0 + + } + + + delete_oc_resource "marketplaceconfig.marketplace.redhat.com/marketplaceconfig" "${CR_NAMESPACE}" + + + restartPolicy: Never + serviceAccountName: {{ $sa_name }} + volumes: [] + backoffLimit: 4 + \ No newline at end of file diff --git a/cluster-applications/021-ibm-dro-cleanup/values.yaml b/cluster-applications/021-ibm-dro-cleanup/values.yaml new file mode 100644 index 000000000..ed97d539c --- /dev/null +++ b/cluster-applications/021-ibm-dro-cleanup/values.yaml @@ -0,0 +1 @@ +--- diff --git a/cluster-applications/040-cis-compliance/Chart.yaml b/cluster-applications/040-cis-compliance/Chart.yaml new file mode 100644 index 000000000..1be84696a --- /dev/null +++ b/cluster-applications/040-cis-compliance/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +name: cis-compliance +description: IBM CIS Compliance +type: application +version: 1.0.0 diff --git a/cluster-applications/040-cis-compliance/README.md b/cluster-applications/040-cis-compliance/README.md new file mode 100644 index 000000000..f2583b789 --- /dev/null +++ b/cluster-applications/040-cis-compliance/README.md @@ -0,0 +1,3 @@ +IBM CIS Compliance +=============================================================================== +Installs IBM Compliance Operator into the `openshift-compliance` namespace and add disable rules in tailoredprofile for limitation on ROSA \ No newline at end of file diff --git a/cluster-applications/040-cis-compliance/templates/01-cis-compliance_Namespace.yaml b/cluster-applications/040-cis-compliance/templates/01-cis-compliance_Namespace.yaml new file mode 100644 index 000000000..9a1dfdcc4 --- /dev/null +++ b/cluster-applications/040-cis-compliance/templates/01-cis-compliance_Namespace.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: openshift-compliance + annotations: + argocd.argoproj.io/sync-wave: "040" + labels: + argocd.argoproj.io/managed-by: {{ .Values.argo_namespace }} +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} diff --git a/cluster-applications/040-cis-compliance/templates/02-cis-compliance_OperatorGroup.yaml b/cluster-applications/040-cis-compliance/templates/02-cis-compliance_OperatorGroup.yaml new file mode 100644 index 000000000..2cc3d86dd --- /dev/null +++ b/cluster-applications/040-cis-compliance/templates/02-cis-compliance_OperatorGroup.yaml @@ -0,0 +1,16 @@ +--- +apiVersion: operators.coreos.com/v1 +kind: OperatorGroup +metadata: + name: compliance-operator + namespace: openshift-compliance + annotations: + argocd.argoproj.io/sync-wave: "041" + argocd.argoproj.io/sync-options: "SkipDryRunOnMissingResource=true,Validate=false" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + targetNamespaces: + - openshift-compliance diff --git a/cluster-applications/040-cis-compliance/templates/03-cis-compliance_Subscription.yaml b/cluster-applications/040-cis-compliance/templates/03-cis-compliance_Subscription.yaml new file mode 100644 index 000000000..0a3bb37ec --- /dev/null +++ b/cluster-applications/040-cis-compliance/templates/03-cis-compliance_Subscription.yaml @@ -0,0 +1,25 @@ +--- +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: compliance-operator-sub + namespace: openshift-compliance + annotations: + argocd.argoproj.io/sync-wave: "042" + argocd.argoproj.io/sync-options: "SkipDryRunOnMissingResource=true,Validate=false" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + channel: stable + name: compliance-operator + installPlanApproval: Automatic + source: redhat-operators + sourceNamespace: openshift-marketplace + config: + nodeSelector: + node-role.kubernetes.io/worker: "" + env: + - name: PLATFORM + value: "HyperShift" diff --git a/cluster-applications/040-cis-compliance/templates/04-cis-compliance_scansettingbinding.yml b/cluster-applications/040-cis-compliance/templates/04-cis-compliance_scansettingbinding.yml new file mode 100644 index 000000000..d8476f016 --- /dev/null +++ b/cluster-applications/040-cis-compliance/templates/04-cis-compliance_scansettingbinding.yml @@ -0,0 +1,24 @@ +--- +apiVersion: compliance.openshift.io/v1alpha1 +kind: ScanSettingBinding +metadata: + name: mas-cis-compliance + namespace: openshift-compliance + annotations: + argocd.argoproj.io/sync-wave: "043" + argocd.argoproj.io/sync-options: "SkipDryRunOnMissingResource=true,Validate=false" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +profiles: + - apiGroup: compliance.openshift.io/v1alpha1 + kind: TailoredProfile + name: mas-ocp4-cis-node-rosa-tailoredprofile + - apiGroup: compliance.openshift.io/v1alpha1 + kind: TailoredProfile + name: mas-ocp4-cis-rosa-tailoredprofile +settingsRef: + apiGroup: compliance.openshift.io/v1alpha1 + kind: ScanSetting + name: default diff --git a/cluster-applications/040-cis-compliance/templates/05-cis-compliance_ocp4-cis-node-tailoredprofile.yml b/cluster-applications/040-cis-compliance/templates/05-cis-compliance_ocp4-cis-node-tailoredprofile.yml new file mode 100644 index 000000000..be1e0dc00 --- /dev/null +++ b/cluster-applications/040-cis-compliance/templates/05-cis-compliance_ocp4-cis-node-tailoredprofile.yml @@ -0,0 +1,44 @@ +--- +apiVersion: compliance.openshift.io/v1alpha1 +kind: TailoredProfile +metadata: + name: mas-ocp4-cis-node-rosa-tailoredprofile + namespace: openshift-compliance + annotations: + argocd.argoproj.io/sync-wave: "043" + argocd.argoproj.io/sync-options: "SkipDryRunOnMissingResource=true,Validate=false" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + description: OCP4 CIS Node TailoredProfile for ROSA + disableRules: + - name: ocp4-kubelet-eviction-thresholds-set-hard-imagefs-available + rationale: Cannot modify Kubelet config in ROSA + - name: ocp4-kubelet-eviction-thresholds-set-hard-imagefs-inodesfree + rationale: Cannot modify Kubelet config in ROSA + - name: ocp4-kubelet-eviction-thresholds-set-hard-memory-available + rationale: Cannot modify Kubelet config in ROSA + - name: ocp4-kubelet-eviction-thresholds-set-hard-nodefs-available + rationale: Cannot modify Kubelet config in ROSA + - name: ocp4-kubelet-eviction-thresholds-set-hard-nodefs-inodesfree + rationale: Cannot modify Kubelet config in ROSA + - name: ocp4-kubelet-eviction-thresholds-set-soft-imagefs-available + rationale: Cannot modify Kubelet config in ROSA + - name: ocp4-kubelet-eviction-thresholds-set-soft-imagefs-inodesfree + rationale: Cannot modify Kubelet config in ROSA + - name: ocp4-kubelet-eviction-thresholds-set-soft-memory-available + rationale: Cannot modify Kubelet config in ROSA + - name: ocp4-kubelet-eviction-thresholds-set-soft-nodefs-available + rationale: Cannot modify Kubelet config in ROSA + - name: ocp4-kubelet-eviction-thresholds-set-soft-nodefs-inodesfree + rationale: Cannot modify Kubelet config in ROSA + - name: ocp4-kubelet-configure-tls-cipher-suites + rationale: Cannot modify Kubelet config in ROSA + - name: ocp4-kubelet-enable-iptables-util-chains + rationale: Cannot modify Kubelet config in ROSA + - name: ocp4-kubelet-configure-event-creation + rationale: Cannot modify Kubelet config in ROSA + extends: ocp4-cis-node + title: OCP4 CIS Node TailoredProfile diff --git a/cluster-applications/040-cis-compliance/templates/06-cis-compliance_ocp4-cis-tailoredprofile.yml b/cluster-applications/040-cis-compliance/templates/06-cis-compliance_ocp4-cis-tailoredprofile.yml new file mode 100644 index 000000000..2d3445b2f --- /dev/null +++ b/cluster-applications/040-cis-compliance/templates/06-cis-compliance_ocp4-cis-tailoredprofile.yml @@ -0,0 +1,20 @@ +--- +apiVersion: compliance.openshift.io/v1alpha1 +kind: TailoredProfile +metadata: + name: mas-ocp4-cis-rosa-tailoredprofile + namespace: openshift-compliance + annotations: + argocd.argoproj.io/sync-wave: "043" + argocd.argoproj.io/sync-options: "SkipDryRunOnMissingResource=true,Validate=false" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + description: CIS TailoredProfile for ROSA + disableRules: + - name: ocp4-kubeadmin-removed + rationale: At the moment we still want cluster-admin user as a fall back plan if rbac fails + extends: ocp4-cis + title: CIS TailoredProfile diff --git a/cluster-applications/040-cis-compliance/templates/postdelete-ProfileBundles-resources.yaml b/cluster-applications/040-cis-compliance/templates/postdelete-ProfileBundles-resources.yaml new file mode 100644 index 000000000..1d71f4aee --- /dev/null +++ b/cluster-applications/040-cis-compliance/templates/postdelete-ProfileBundles-resources.yaml @@ -0,0 +1,69 @@ + +# postdelete-ProfileBundles-resources template in the cis-compliance-cleanup chart depends on these resources +# if you make any updates here, ensure that template is updated to align +{{ $role_name := "postdelete-delete-profilebundles-r" }} +{{ $sa_name := "postdelete-delete-profilebundles-sa" }} +{{ $rb_name := "postdelete-delete-profilebundles-rb" }} +{{ $np_name := "postdelete-delete-profilebundles-np" }} +{{ $job_label := "postdelete-delete-profilebundles-job" }} +{{ $ns := "openshift-compliance" }} + + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ $sa_name }} + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/sync-wave: "000" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} + +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ $role_name }} + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/sync-wave: "000" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +rules: + - verbs: + - get + - watch + - list + - update + - delete + apiGroups: + - compliance.openshift.io + resources: + - profilebundles + +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ $rb_name }} + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/sync-wave: "001" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +subjects: + - kind: ServiceAccount + name: {{ $sa_name }} + namespace: {{ $ns }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ $role_name }} + diff --git a/cluster-applications/040-cis-compliance/values.yaml b/cluster-applications/040-cis-compliance/values.yaml new file mode 100644 index 000000000..ed97d539c --- /dev/null +++ b/cluster-applications/040-cis-compliance/values.yaml @@ -0,0 +1 @@ +--- diff --git a/cluster-applications/041-cis-compliance-cleanup/Chart.yaml b/cluster-applications/041-cis-compliance-cleanup/Chart.yaml new file mode 100644 index 000000000..2ee6f5278 --- /dev/null +++ b/cluster-applications/041-cis-compliance-cleanup/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +name: cis-compliance-cleanup +description: IBM CIS Compliance (Cleanup) +type: application +version: 1.0.0 diff --git a/cluster-applications/041-cis-compliance-cleanup/README.md b/cluster-applications/041-cis-compliance-cleanup/README.md new file mode 100644 index 000000000..0724d8241 --- /dev/null +++ b/cluster-applications/041-cis-compliance-cleanup/README.md @@ -0,0 +1,6 @@ +IBM CIS Compliance Cleanup +=============================================================================== +Contains a PostDelete hook that issues deletes for ProfileBundle CRs to allow cis-compliance operator uninstall to proceed. +This chart must be managed by an Application in a later syncwave than cis-compliance to ensure the PostDelete hook can +complete before the cis-compliance operator is removed (otherwise the pods responsible for managing the ProfileBundle +finalizers will be removed before they get a chance to complete). \ No newline at end of file diff --git a/cluster-applications/041-cis-compliance-cleanup/templates/00-placeholder_ConfigMap.yaml b/cluster-applications/041-cis-compliance-cleanup/templates/00-placeholder_ConfigMap.yaml new file mode 100644 index 000000000..0a129719a --- /dev/null +++ b/cluster-applications/041-cis-compliance-cleanup/templates/00-placeholder_ConfigMap.yaml @@ -0,0 +1,16 @@ +--- +# This to prevent AVP from complaining about there being no manifests +# if none of the other resources in this chart end up being rendered +apiVersion: v1 +kind: ConfigMap +metadata: + name: placeholder + namespace: openshift-compliance + annotations: + argocd.argoproj.io/sync-wave: "00" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +data: + nodata: "" \ No newline at end of file diff --git a/cluster-applications/041-cis-compliance-cleanup/templates/postdelete-ProfileBundles.yaml b/cluster-applications/041-cis-compliance-cleanup/templates/postdelete-ProfileBundles.yaml new file mode 100644 index 000000000..b5c4693f0 --- /dev/null +++ b/cluster-applications/041-cis-compliance-cleanup/templates/postdelete-ProfileBundles.yaml @@ -0,0 +1,111 @@ + +{{ $job_name := "postdelete-delete-profilebundles-job" }} + +# NOTE: depends on resources created in the cis-compliance chart (psotdelete-ProfileBundles-resources) +# The values below must align with the values in that file +{{ $role_name := "postdelete-delete-profilebundles-r" }} +{{ $sa_name := "postdelete-delete-profilebundles-sa" }} +{{ $rb_name := "postdelete-delete-profilebundles-rb" }} +{{ $np_name := "postdelete-delete-profilebundles-np" }} +{{ $job_label := "postdelete-delete-profilebundles-job" }} +{{ $ns := "openshift-compliance" }} + + +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ $job_name }} + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/hook: PostDelete +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + template: + metadata: + labels: + app: {{ $job_label }} +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 8 }} +{{- end }} + spec: + containers: + - name: run + # TODO: use a dedicated image with a smaller footprint for this sort of thing? + # Just using cli for now since it has all the deps we need to talk with AWS SM + image: quay.io/ibmmas/cli:9.4.0-pre.gitops + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 200m + memory: 512Mi + requests: + cpu: 10m + memory: 64Mi + env: + - name: CR_NAMESPACE + value: {{ $ns }} + + volumeMounts: [] + command: + - /bin/sh + - -c + - | + + set -e + + function delete_oc_resource(){ + RESOURCE=$1 + NAMESPACE=$2 + echo + echo "------------------------------------------------------------------" + echo "Check if resource $RESOURCE is present in namespace $NAMESPACE " + + # don't want a non-zero rc from oc delete to cause the job to fail + # so, temporarily set +e + set +e + RESOURCE_NAME=$(oc get $RESOURCE -n $NAMESPACE -o=jsonpath="{.metadata.name}") + set -e + if [[ -z "${RESOURCE_NAME}" ]]; then + echo "$RESOURCE not found, skipping" + return 0 + fi + + echo "oc delete resource $RESOURCE in namespace $NAMESPACE " + + # don't want a non-zero rc from oc delete to cause the job to fail (since we then want to try patching out the finalizers) + # so, temporarily set +e + set +e + oc delete $RESOURCE -n $NAMESPACE --timeout=300s --wait=true + return_code=$? + set -e + + echo "Verify that resource $RESOURCE is now absent in namespace $NAMESPACE " + # don't want a non-zero rc from oc delete to cause the job to fail + # so, temporarily set +e + set +e + RESOURCE_NAME=$(oc get $RESOURCE -n $NAMESPACE -o=jsonpath="{.metadata.name}") + set -e + if [[ -n "${RESOURCE_NAME}" ]]; then + echo "$RESOURCE still present, failing job" + exit 1 + fi + + echo "... verified" + return 0 + + } + + + delete_oc_resource "profilebundle.compliance.openshift.io/ocp4" "${CR_NAMESPACE}" + delete_oc_resource "profilebundle.compliance.openshift.io/rhcos4" "${CR_NAMESPACE}" + + + restartPolicy: Never + serviceAccountName: {{ $sa_name }} + volumes: [] + backoffLimit: 4 + diff --git a/cluster-applications/041-cis-compliance-cleanup/values.yaml b/cluster-applications/041-cis-compliance-cleanup/values.yaml new file mode 100644 index 000000000..ed97d539c --- /dev/null +++ b/cluster-applications/041-cis-compliance-cleanup/values.yaml @@ -0,0 +1 @@ +--- diff --git a/cluster-applications/050-nvidia-gpu-operator/Chart.yaml b/cluster-applications/050-nvidia-gpu-operator/Chart.yaml new file mode 100644 index 000000000..fd5f2da8e --- /dev/null +++ b/cluster-applications/050-nvidia-gpu-operator/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +name: nvidia-gpu-operator +description: Nvidia GPU Operator +type: application +version: 1.0.0 \ No newline at end of file diff --git a/cluster-applications/050-nvidia-gpu-operator/README.md b/cluster-applications/050-nvidia-gpu-operator/README.md new file mode 100644 index 000000000..f698a64f8 --- /dev/null +++ b/cluster-applications/050-nvidia-gpu-operator/README.md @@ -0,0 +1,3 @@ +Nvidia GPU Operator +=============================================================================== +Installs the GPU app dependency, Redhat Node Feature Discovery, and the Nvidia GPU Operator \ No newline at end of file diff --git a/cluster-applications/050-nvidia-gpu-operator/templates/00-nfd_Namespace.yaml b/cluster-applications/050-nvidia-gpu-operator/templates/00-nfd_Namespace.yaml new file mode 100644 index 000000000..6826ac6f6 --- /dev/null +++ b/cluster-applications/050-nvidia-gpu-operator/templates/00-nfd_Namespace.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: "{{ .Values.nfd_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "050" + labels: + argocd.argoproj.io/managed-by: {{ .Values.argo_namespace }} +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} diff --git a/cluster-applications/050-nvidia-gpu-operator/templates/01-nfd_OperatorGroup.yaml b/cluster-applications/050-nvidia-gpu-operator/templates/01-nfd_OperatorGroup.yaml new file mode 100644 index 000000000..449b363bb --- /dev/null +++ b/cluster-applications/050-nvidia-gpu-operator/templates/01-nfd_OperatorGroup.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: operators.coreos.com/v1 +kind: OperatorGroup +metadata: + name: "{{ .Values.nfd_namespace }}-group" + namespace: "{{ .Values.nfd_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "051" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + targetNamespaces: + - "{{ .Values.nfd_namespace }}" diff --git a/cluster-applications/050-nvidia-gpu-operator/templates/02-nfd_Subcription.yaml b/cluster-applications/050-nvidia-gpu-operator/templates/02-nfd_Subcription.yaml new file mode 100644 index 000000000..83d28e14e --- /dev/null +++ b/cluster-applications/050-nvidia-gpu-operator/templates/02-nfd_Subcription.yaml @@ -0,0 +1,18 @@ +--- +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: nfd-operator + namespace: "{{ .Values.nfd_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "052" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + channel: "{{ .Values.nfd_channel }}" + installPlanApproval: Automatic + name: nfd + source: redhat-operators + sourceNamespace: openshift-marketplace diff --git a/cluster-applications/050-nvidia-gpu-operator/templates/03-nfd_instance.yaml b/cluster-applications/050-nvidia-gpu-operator/templates/03-nfd_instance.yaml new file mode 100644 index 000000000..ca7d1f402 --- /dev/null +++ b/cluster-applications/050-nvidia-gpu-operator/templates/03-nfd_instance.yaml @@ -0,0 +1,20 @@ +--- +apiVersion: nfd.openshift.io/v1 +kind: NodeFeatureDiscovery +metadata: + namespace: "{{ .Values.nfd_namespace }}" + name: nfd-master-worker + annotations: + argocd.argoproj.io/sync-wave: "053" + argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + instance: "" + topologyUpdater: false + operand: + image: >- + registry.redhat.io/openshift4/ose-node-feature-discovery@sha256:cc09665d75447c53a86a5acb5926f9b9fb59294533e04bfa432001d2b41efebc + imagePullPolicy: Always diff --git a/cluster-applications/050-nvidia-gpu-operator/templates/04-gpu_Namespace.yaml b/cluster-applications/050-nvidia-gpu-operator/templates/04-gpu_Namespace.yaml new file mode 100644 index 000000000..b5fee81e4 --- /dev/null +++ b/cluster-applications/050-nvidia-gpu-operator/templates/04-gpu_Namespace.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: "{{ .Values.gpu_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "051" + labels: + argocd.argoproj.io/managed-by: {{ .Values.argo_namespace }} +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} diff --git a/cluster-applications/050-nvidia-gpu-operator/templates/05-gpu_OperatorGroup.yaml b/cluster-applications/050-nvidia-gpu-operator/templates/05-gpu_OperatorGroup.yaml new file mode 100644 index 000000000..ea151f263 --- /dev/null +++ b/cluster-applications/050-nvidia-gpu-operator/templates/05-gpu_OperatorGroup.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: operators.coreos.com/v1 +kind: OperatorGroup +metadata: + name: "{{ .Values.gpu_namespace }}-group" + namespace: "{{ .Values.gpu_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "052" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + targetNamespaces: + - "{{ .Values.gpu_namespace }}" diff --git a/cluster-applications/050-nvidia-gpu-operator/templates/06-gpu_Subcription.yaml b/cluster-applications/050-nvidia-gpu-operator/templates/06-gpu_Subcription.yaml new file mode 100644 index 000000000..126df9fc4 --- /dev/null +++ b/cluster-applications/050-nvidia-gpu-operator/templates/06-gpu_Subcription.yaml @@ -0,0 +1,18 @@ +--- +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: gpu-operator-certified + namespace: "{{ .Values.gpu_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "054" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + channel: "{{ .Values.gpu_channel }}" + installPlanApproval: Automatic + name: gpu-operator-certified + source: certified-operators + sourceNamespace: openshift-marketplace diff --git a/cluster-applications/050-nvidia-gpu-operator/templates/07-gpu_clusterpolicy.yaml b/cluster-applications/050-nvidia-gpu-operator/templates/07-gpu_clusterpolicy.yaml new file mode 100644 index 000000000..3acdc0591 --- /dev/null +++ b/cluster-applications/050-nvidia-gpu-operator/templates/07-gpu_clusterpolicy.yaml @@ -0,0 +1,73 @@ +--- +apiVersion: nvidia.com/v1 +kind: ClusterPolicy +metadata: + name: gpu-cluster-policy + annotations: + argocd.argoproj.io/sync-wave: "055" + argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + driver: + repository: "{{ .Values.gpu_driver_repository_path }}" + image: driver + imagePullSecrets: [] + licensingConfig: + configMapName: '' + nlsEnabled: false + version: "{{ .Values.gpu_driver_version }}" + enabled: true + vgpuDeviceManager: + enabled: true + migManager: + enabled: true + operator: + defaultRuntime: crio + initContainer: {} + runtimeClass: nvidia + use_ocp_driver_toolkit: true + dcgm: + enabled: true + gfd: + enabled: true + dcgmExporter: + config: + name: '' + enabled: true + serviceMonitor: + enabled: true + devicePlugin: + config: + default: '' + name: '' + enabled: true + mig: + strategy: single + sandboxDevicePlugin: + enabled: true + validator: + plugin: + env: + - name: WITH_WORKLOAD + value: 'true' + nodeStatusExporter: + enabled: true + daemonsets: + rollingUpdate: + maxUnavailable: '1' + updateStrategy: RollingUpdate + sandboxWorkloads: + defaultWorkload: container + enabled: false + gds: + enabled: false + vgpuManager: + enabled: false + vfioManager: + enabled: true + toolkit: + enabled: true + installDir: /usr/local/nvidia diff --git a/cluster-applications/050-nvidia-gpu-operator/templates/08-ibm-mas-customscc.yaml b/cluster-applications/050-nvidia-gpu-operator/templates/08-ibm-mas-customscc.yaml new file mode 100644 index 000000000..cc23b7bce --- /dev/null +++ b/cluster-applications/050-nvidia-gpu-operator/templates/08-ibm-mas-customscc.yaml @@ -0,0 +1,64 @@ +--- +allowHostDirVolumePlugin: false +allowHostIPC: false +allowHostNetwork: false +allowHostPID: false +allowHostPorts: false +allowPrivilegeEscalation: false +allowPrivilegedContainer: false +allowedCapabilities: +- CHOWN +- DAC_OVERRIDE +- FOWNER +- FSETID +- KILL +- SETGID +- SETUID +- SETPCAP +- NET_BIND_SERVICE +- NET_RAW +- SYS_CHROOT +allowedUnsafeSysctls: null +apiVersion: security.openshift.io/v1 +defaultAddCapabilities: null +fsGroup: + type: MustRunAs + ranges: + - max: 65535 + min: 1 +groups: [] +kind: SecurityContextConstraints +metadata: + annotations: + argocd.argoproj.io/sync-wave: "056" + argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true + kubernetes.io/description: "This policy is the most restrictive for IBM Maximo Visual Inspection." + name: ibm-mas-visualinspection-scc +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +readOnlyRootFilesystem: false +requiredDropCapabilities: +- ALL +runAsUser: + type: MustRunAsRange + uidRangeMax: 65535 + uidRangeMin: 0 +seLinuxContext: + type: RunAsAny +seccompProfiles: null +supplementalGroups: + type: MustRunAs + ranges: + - max: 65535 + min: 1 +users: [] +volumes: +- configMap +- downwardAPI +- emptyDir +- persistentVolumeClaim +- projected +- secret + diff --git a/cluster-applications/050-nvidia-gpu-operator/values.yaml b/cluster-applications/050-nvidia-gpu-operator/values.yaml new file mode 100644 index 000000000..f3abcfa8f --- /dev/null +++ b/cluster-applications/050-nvidia-gpu-operator/values.yaml @@ -0,0 +1,8 @@ +--- +nvidia_gpu_operator: + gpu_namespace: "nvidia-gpu-operator" + gpu_channel: "v23.3" + nfd_namespace: "openshift-nfd" + nfd_channel: "stable" + gpu_driver_version: 525.105.17 + gpu_driver_repository_path: "nvcr.io/nvidia" diff --git a/cluster-applications/060-ibm-db2u/Chart.yaml b/cluster-applications/060-ibm-db2u/Chart.yaml new file mode 100644 index 000000000..71f45919b --- /dev/null +++ b/cluster-applications/060-ibm-db2u/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +name: ibm-db2u +description: IBM DB2U +type: application +version: 1.0.0 diff --git a/cluster-applications/060-ibm-db2u/README.md b/cluster-applications/060-ibm-db2u/README.md new file mode 100644 index 000000000..dcd6c2682 --- /dev/null +++ b/cluster-applications/060-ibm-db2u/README.md @@ -0,0 +1,3 @@ +IBM DB2U +=============================================================================== +Deploy and configure db2 operator with configurable version \ No newline at end of file diff --git a/cluster-applications/060-ibm-db2u/templates/00-db2_Namespace.yaml b/cluster-applications/060-ibm-db2u/templates/00-db2_Namespace.yaml new file mode 100644 index 000000000..ec9d15851 --- /dev/null +++ b/cluster-applications/060-ibm-db2u/templates/00-db2_Namespace.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: "{{ .Values.db2_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "060" + labels: + argocd.argoproj.io/managed-by: {{ .Values.argo_namespace }} +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} diff --git a/cluster-applications/060-ibm-db2u/templates/01-db2-registry_Secret_0.yaml b/cluster-applications/060-ibm-db2u/templates/01-db2-registry_Secret_0.yaml new file mode 100644 index 000000000..a98e3abf4 --- /dev/null +++ b/cluster-applications/060-ibm-db2u/templates/01-db2-registry_Secret_0.yaml @@ -0,0 +1,16 @@ +--- +kind: Secret +apiVersion: v1 +metadata: + name: ibm-registry + namespace: "{{ .Values.db2_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "061" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: >- + {{ .Values.ibm_entitlement_key }} diff --git a/cluster-applications/060-ibm-db2u/templates/01-db2_OperatorGroup.yaml b/cluster-applications/060-ibm-db2u/templates/01-db2_OperatorGroup.yaml new file mode 100644 index 000000000..e0167d3e8 --- /dev/null +++ b/cluster-applications/060-ibm-db2u/templates/01-db2_OperatorGroup.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: operators.coreos.com/v1 +kind: OperatorGroup +metadata: + name: ibm-mas-operator-group + namespace: "{{ .Values.db2_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "061" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + targetNamespaces: + - "{{ .Values.db2_namespace }}" diff --git a/cluster-applications/060-ibm-db2u/templates/03-db2_Subscription.yaml b/cluster-applications/060-ibm-db2u/templates/03-db2_Subscription.yaml new file mode 100644 index 000000000..461db199e --- /dev/null +++ b/cluster-applications/060-ibm-db2u/templates/03-db2_Subscription.yaml @@ -0,0 +1,18 @@ +--- +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: ibm-db2u-operator + namespace: "{{ .Values.db2_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "062" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + channel: {{ .Values.db2_channel }} + installPlanApproval: Automatic + name: db2u-operator + source: ibm-operator-catalog + sourceNamespace: openshift-marketplace diff --git a/cluster-applications/060-ibm-db2u/templates/04-db2-ca_Issuer.yaml b/cluster-applications/060-ibm-db2u/templates/04-db2-ca_Issuer.yaml new file mode 100644 index 000000000..f6efda7b0 --- /dev/null +++ b/cluster-applications/060-ibm-db2u/templates/04-db2-ca_Issuer.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: "db2u-ca-issuer" + namespace: "{{ .Values.db2_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "064" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + selfSigned: {} diff --git a/cluster-applications/060-ibm-db2u/templates/05-db2-ca_Certificate.yaml b/cluster-applications/060-ibm-db2u/templates/05-db2-ca_Certificate.yaml new file mode 100644 index 000000000..07f64f922 --- /dev/null +++ b/cluster-applications/060-ibm-db2u/templates/05-db2-ca_Certificate.yaml @@ -0,0 +1,35 @@ +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: "db2u-ca-certificate" + namespace: "{{ .Values.db2_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "064" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + secretName: "db2u-ca" + duration: "175200h0m0s" + renewBefore: "2160h0m0s" + issuerRef: + name: "db2u-ca-issuer" + kind: Issuer + isCA: true + usages: + - cert sign + - digital signature + - key encipherment + - server auth + commonName: "ca.db2u" + subject: + countries: + - GB + streetAddresses: + - London + localities: + - London + organizationalUnits: + - IBM Maximo Application Suite DB2U diff --git a/cluster-applications/060-ibm-db2u/values.yaml b/cluster-applications/060-ibm-db2u/values.yaml new file mode 100644 index 000000000..c61a01c40 --- /dev/null +++ b/cluster-applications/060-ibm-db2u/values.yaml @@ -0,0 +1,4 @@ +--- +db2_namespace: "db2u" +ibm_entitlement_key: "" +db2_channel: "v110508.0" \ No newline at end of file diff --git a/docs/accountrootmanifest.md b/docs/accountrootmanifest.md new file mode 100644 index 000000000..ab70ceb2c --- /dev/null +++ b/docs/accountrootmanifest.md @@ -0,0 +1,85 @@ +Account Root Application Manifest +=============================================================================== + +The **Account Root Application** is created directly on the {{ management_cluster() }} running ArgoCD. It serves as the "entrypoint" to the MAS GitOps code and is where several key pieces of global configuration values are provided. + +### Template + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: root. + namespace: +spec: + destination: + namespace: + server: 'https://kubernetes.default.svc' + project: "" + source: + path: root-applications/ibm-mas-account-root + repoURL: + targetRevision: "" + helm: + values: | + account: + id: " + + generator: + repo_url: "" + revision: "" + + source: + repo_url: "" + revision: "" + + argo: + namespace: "" + projects: + rootapps: " + apps: "" + + avp: + name: "" + secret: "" + values_varname: "" + + syncPolicy: + syncOptions: + - CreateNamespace=false +``` + +### Parameters +#### `` +The URL of the Git repository containing the MAS GitOps Helm Charts, e.g. https://github.com/ibm-mas/gitops, aka the {{ source_repo() }} + +#### `` +The branch of `` to source the MAS GitOps Helm Charts from, e.g. `master`. + +#### `` +The Git repository to source MAS cluster/instance configuration from, aka the {{ config_repo() }} + +#### `` +The revision of `` to source cluster/instance configuration from + +#### `` +The ID of the account this root application manages. This also determines the root folder in ``:`` +The namespace in which ArgoCD is installed on the {{ management_cluster() }}. E.g. `openshift-gitops`, `argocd-worker`. This determines where Application and ApplicationSet resources will be created. It will also be used to annotate namespaces created by our charts with [argocd.argoproj.io/managed-by](https://argocd-operator.readthedocs.io/en/stable/usage/deploy-to-different-namespaces/). + +#### `` +The ArgoCD project in which to create root applications (including this Application and the root applications that it generates). The project must be configured to permit creation of `argoproj.io.Application` and `argoproj.io.ApplicationSet` resources in the `` of the cluster in which ArgoCD is running (i.e. `https://kubernetes.default.svc`). + +#### `` +The ArgoCD project in which to create the applications that deploy MAS resources (and their dependencies) to external MAS clusters. The project must be configured to permit creation of any resource in any namespace of all external MAS clusters targeted by this account. + +#### `` +The name assigned to the ArgoCD Vault Plugin used for retrieving secrets. Defaults to `argocd-vault-plugin-helm`. + +#### `` +The name of the k8s secret containing the credentials for accessing the vault that AVP is linked with. Defaults to the empty string, which implies that these credentials have been configured already in the cluster. + +#### `` +The name of the environment variable used to pass values inline to AVP. Defaults to `HELM_VALUES`. + diff --git a/docs/architecture.md b/docs/architecture.md new file mode 100644 index 000000000..118e355de --- /dev/null +++ b/docs/architecture.md @@ -0,0 +1,15 @@ +Architecture +=============================================================================== + + + +MAS GitOps employs ArgoCD to deploy MAS instances to {{ target_clusters() }} using information from three sources: the {{ source_repo() }}, the {{ config_repo() }}, and the {{ secrets_vault() }}. + +- {{ source_repo() }}: A Git repository containing the MAS GitOps Helm Charts that define the Kubernetes resources needed for MAS deployments. +- {{ config_repo() }}: Contains YAML files with configuration values for rendering the Helm Charts, specifying the number, locations, and configurations of MAS deployments. +- {{ secrets_vault() }}: Stores sensitive values that should not be exposed in the {{ config_repo() }}. + +ArgoCD is installed and configured on some {{ management_cluster() }}. A single **Account Root Application** is registered with ArgoCD. This application monitors the {{ config_repo() }} and dynamically generates a hierarchy of applications that manage MAS deployments on the {{ target_clusters() }}. + +![Architecture](drawio/architecture.drawio) + diff --git a/docs/configrepo.md b/docs/configrepo.md new file mode 100644 index 000000000..50fd8e3ee --- /dev/null +++ b/docs/configrepo.md @@ -0,0 +1,71 @@ + +{{ config_repo() }} +=============================================================================== + +The {{ config_repo() }} represents the "source of truth" that (along with the Charts in the {{ source_repo() }} and the secrets in the {{ secrets_vault() }}) provides everything ArgoCD needs to install and manage MAS instances across {{ target_clusters() }}. + +It is structured as a hierarchy, with "accounts" at the top, followed by "clusters", followed by "instances". Each level contains different types of YAML configuration files. Each YAML configuration file will cause ArgoCD to generate one (or more) application(s), which in turn render Helm charts into the appropriate {{ target_cluster() }}. + +Here is the structure of an example {{ config_repo() }} containing configuration for three accounts (`dev`, `staging`, `production`) with a number of clusters and MAS instances: +``` +├── dev +│   ├── cluster1 +│   │   ├── instance1 +│   │   │   └── *.yaml +│   │   ├── instance2 +│   │   │   └── *.yaml +│   │   ├── instance3 +│   │   │   └── *.yaml +│   │   └── *.yaml +│   └── cluster2 +│   ├── *.yaml +│   └── instance1 +│   └── *.yaml +├── staging +│   └── cluster1 +│   ├── instance1 +│   │   └── *.yaml +│   ├── instance2 +│   │   └── *.yaml +│   └── *.yaml +└── production + └── cluster1 + ├── *.yaml + ├── instance1 + │   └── *.yaml + └── instance2 + └── *.yaml +``` + +The current set of YAML configuration files recognised by MAS GitOps at each level is as follows: + +``` +├── +│   └── +│   ├── +│   │   ├── ibm-db2u-databases.yaml +│   │   ├── ibm-mas-instance-base.yaml +│   │   ├── ibm-mas-masapp-assist-install.yaml +│   │   ├── ibm-mas-masapp-configs.yaml +│   │   ├── ibm-mas-masapp-iot-install.yaml +│   │   ├── ibm-mas-masapp-manage-install.yaml +│   │   ├── ibm-mas-masapp-monitor-install.yaml +│   │   ├── ibm-mas-masapp-optimizer-install.yaml +│   │   ├── ibm-mas-masapp-visualinspection-install.yaml +│   │   ├── ibm-mas-suite-configs.yaml +│   │   ├── ibm-mas-suite.yaml +│   │   ├── ibm-mas-workspaces.yaml +│   │   ├── ibm-sls.yaml +│   │   ├── ibm-cp4d.yaml +│   │   └── ibm-wsl.yaml +│   ├── ibm-db2u.yaml +│   ├── ibm-dro.yaml +│   ├── ibm-mas-cluster-base.yaml +│   ├── ibm-operator-catalog.yaml +│   ├── nvidia-gpu-operator.yaml +│   └── redhat-cert-manager.yaml +``` + +!!! info + See {{ gitops_repo_dir_link("example-config") }} for examples of each of these YAML files for a single account, cluster and MAS instance. + diff --git a/docs/configtoinstances.md b/docs/configtoinstances.md new file mode 100644 index 000000000..3af3306b9 --- /dev/null +++ b/docs/configtoinstances.md @@ -0,0 +1,583 @@ +Mapping Config to MAS Deployments +=============================================================================== + +A combination of [ArgoCD Application Sets](https://argo-cd.readthedocs.io/en/stable/operator-manual/applicationset/) and the [App of Apps pattern](https://argo-cd.readthedocs.io/en/stable/operator-manual/cluster-bootstrapping/#app-of-apps-pattern) is used by MAS GitOps to generate a tree of ArgoCD Applications that install and manage MAS instances in {{ target_clusters() }} based on the configuration files in the {{ config_repo() }}. + +The tree of Applications and Application Sets looks like this: + +![Application Structure](drawio/appstructure.drawio) + +The following describes *how* this tree is generated. + +The Account Root Application +------------------------------------------------------------------------------- + +It begins with the **Account Root Application**. This is created directly on the cluster running ArgoCD. It serves as the "entrypoint" to the MAS GitOps Helm Charts and is where several key pieces of global configuration values are provided. + +The manifest for the **Account Root Application** in our example is shown in the snippet below. The account ID, source repo, config (aka "generator") repo are configured here. + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: root.dev + namespace: openshift-gitops +spec: + destination: + namespace: openshift-gitops + server: 'https://kubernetes.default.svc' + project: "mas" + source: + path: root-applications/ibm-mas-account-root + repoURL: https://github.com/ibm-mas/gitops + targetRevision: master + helm: + values: | + account: + id: dev + + source: + repo_url: "https://github.com/ibm-mas/gitops" + revision: "mas" + + generator: + repo_url: "https://github.com/me/my-config-repo" + revision: "main" + + argo: + namespace: "openshift-gitops" +``` + +The **Account Root Application** establishes the {{ cluster_root_app_set() }}. + + + +The Cluster Root Application Set +------------------------------------------------------------------------------- +The {{ cluster_root_app_set() }} generates a set of **Cluster Root Applications** based on the configuration in the {{ config_repo() }}. + +The {{ cluster_root_app_set() }} employs an ArgoCD [Merge Generator](https://argo-cd.readthedocs.io/en/stable/operator-manual/applicationset/Generators-Merge/) with a list of ArgoCD [Git File Generators](https://argo-cd.readthedocs.io/en/stable/operator-manual/applicationset/Generators-Git/#git-generator-files). The Git File Generators monitor for named YAML configuration files at the cluster level in the {{ config_repo() }} and the Merge Generator combines each of these files into a single YAML object per MAS cluster. + +A simplified and abridged snippet showing the Merge and Git File generators from the {{ cluster_root_app_set() }} template is shown below: + + +```yaml +{% raw %}spec: + ... + generators: + - merge: + mergeKeys: + - 'merge-key' + generators: + - git: + files: + - path: "{{ .Values.account.id }}/*/ibm-mas-cluster-base.yaml" + - git: + files: + - path: "{{ .Values.account.id }}/*/ibm-operator-catalog.yaml" + ...{% endraw %} +``` + +To illustrate, the following shows an example {{ config_repo() }} that defines a `dev` account containing configuration for two {{ target_clusters() }} (`cluster1` and `cluster2`). These are the files that the Git File Generators above are looking for. +```none +├── dev +│   ├── cluster1 +│   │   ├── ibm-mas-cluster-base.yaml +│   │   ├── ibm-operator-catalog.yaml +│   └── cluster2 +│   │   ├── ibm-mas-cluster-base.yaml +│   │   ├── ibm-operator-catalog.yaml +``` + + +Now let's take a look at the contents of these files: + +``` +├── dev +│   ├── cluster1 +| | |------------------------------------------- +│   │   ├── ibm-mas-cluster-base.yaml +| | |------------------------------------------- +| | | merge-key: "dev/cluster1" +| | | account: +| | | id: dev +| | | cluster: +| | | id: cluster1 +| | | url: https://api.cluster1.cakv.p3.openshiftapps.com:443 +| | | +| | |------------------------------------------- +│   │   ├── ibm-operator-catalog.yaml +| | |------------------------------------------- +| | | merge-key: "dev/cluster1" +| | | ibm_operator_catalog: +| | | mas_catalog_version: v8-240430-amd64 +| | | +│   └── cluster2 +| | |------------------------------------------- +│   │   ├── ibm-mas-cluster-base.yaml +| | |------------------------------------------- +| | | merge-key: "dev/cluster2" +| | | account: +| | | id: dev +| | | cluster: +| | | id: cluster2 +| | | url: https://api.cluster2.jsig.p3.openshiftapps.com:443 +| | | +| | |------------------------------------------- +│   │   ├── ibm-operator-catalog.yaml +| | |------------------------------------------- +| | | merge-key: "dev/cluster2" +| | | ibm_operator_catalog: +| | | mas_catalog_version: v8-240405-amd64 +``` + +All of the files contain a `merge-key` which includes the account ID and the cluster ID (e.g. `dev/cluster1`). This is used by the Merge generator to group together configuration into per-cluster YAML objects. + +The `ibm-mas-cluster-base.yaml` file contains global configuration for the cluster, including the `account.id`, and the `cluster.id` and the `cluster.url` which determines the {{ target_cluster() }} that ArgoCD will deploy resources to. + +The other YAML configuration files (such as `ibm-operator-catalog.yaml` shown above) represent one type of cluster-level resource that we wish to install on the {{ target_cluster() }}. + + +Given the config above, {{ cluster_root_app_set() }} generates two YAML objects: +```yaml + merge-key: "dev/cluster1" + account: + id: dev + cluster: + id: cluster1 + url: https://api.cluster1.cakv.p3.openshiftapps.com:443 + ibm_operator_catalog: + mas_catalog_version: v8-240430-amd64 +``` + +```yaml + merge-key: "dev/cluster2" + account: + id: dev + cluster: + id: cluster2 + url: https://api.cluster2.jsig.p3.openshiftapps.com:443 + ibm_operator_catalog: + mas_catalog_version: v8-240405-amd64 +``` + +The generated YAML objects are used to render the template defined in the {{ cluster_root_app_set() }} to generate **Cluster Root Applications** in the {{ management_cluster() }}. + +- [Go Template](https://argo-cd.readthedocs.io/en/stable/operator-manual/applicationset/GoTemplate/) expressions are used to inject **cluster-specific** configuration from the cluster's YAML object into the template (e.g. `{% raw %}{{.cluster.id}}{% endraw %}`). + +- Global configuration that applies to all clusters is passed down from the Helm values used to render the {{ cluster_root_app_set() }} template (e.g. `{% raw %}{{ .Values.source.repo_url }}{% endraw %}`). + +A simplified and abridged snippet of the {{ cluster_root_app_set() }} template is shown below, followed by a breakdown of the purpose of each section: + +```yaml + {% raw %}template: + metadata: + name: "cluster.{{ `{{.cluster.id}}` }}" + ... + spec: + source: + path: root-applications/ibm-mas-cluster-root + helm: + values: "{{ `{{ toYaml . }}` }}" + + parameters: + - name: "source.repo_url" + value: "{{ .Values.source.repo_url }}" + - name: "argo.namespace" + value: "{{ .Values.argo.namespace }}" + + destination: + server: 'https://kubernetes.default.svc' + namespace: {{ .Values.argo.namespace }}{% endraw %} +``` + +!!! info "What are the backticks for?" + + Since the **Cluster Root Application Set** is itself a Helm template (rendered by the **Account Root Application**) we need to tell Helm to not attempt to parse the Go Template expressions, treating them as literals instead. This is achieved by wrapping the Go Template expressions in backticks. The expressions in the snippet above will be rendered by Helm as `{% raw %}"cluster.{{.cluster.id}}"{% endraw %}` and `{% raw %}"{{ toYaml . }}"{% endraw %}`. + + +The **Cluster Root Applications** are named according to their ID: +```yaml +template: + metadata: + {% raw %}name: "cluster.{{ `{{.cluster.id}}` }}"{% endraw %} +``` + +**Cluster Root Applications** render the {{ cluster_root_chart() }}: +```yaml + {% raw %}source: + path: root-applications/ibm-mas-cluster-root{% endraw %} +``` + + +The entire cluster's YAML object is passed in as Helm values to the {{ cluster_root_chart() }}: +```yaml + {% raw %}helm: + values: "{{ `{{ toYaml . }}` }}"{% endraw %} +``` + +Additional global configuration parameters (such as details of the {{ source_repo() }} and the namespace where ArgoCD is running) set on the the **Account Root Application** are passed down as additional Helm parameters: +```yaml + {% raw %}arameters: + - name: "source.repo_url" + value: "{{ .Values.source.repo_url }}" + - name: "argo.namespace" + value: "{{ .Values.argo.namespace }}"{% endraw %} +``` + + +**Cluster Root Applications** are created in the ArgoCD namespace on the {{ management_cluster() }}: +```yaml + {%raw %}destination: + server: 'https://kubernetes.default.svc' + namespace: {{ .Values.argo.namespace }}{% endraw %} +``` + + +Given the config above, two **Cluster Root Applications** are generated: + +```yaml +kind: Application +metadata: + name: cluster.cluster1 +spec: + source: + path: root-applications/ibm-mas-cluster-root + helm: + values: |- + merge-key: dev/cluster1` + account: + id: dev + cluster: + id: cluster1 + url: https://api.cluster1.cakv.p3.openshiftapps.com:443 + ibm_operator_catalog: + mas_catalog_version: v8-240430-amd64 + parameters: + - name: source.repo_url + value: "https://github.com/..." + - name: argo.namespace + value: "openshift-gitops" + destination: + server: 'https://kubernetes.default.svc' + namespace: openshift-gitops +``` +```yaml +kind: Application +metadata: + name: cluster.cluster2 +spec: + source: + path: root-applications/ibm-mas-cluster-root + helm: + values: |- + merge-key: dev/cluster2` + account: + id: dev + cluster: + id: cluster2 + url: https://api.cluster2.jsig.p3.openshiftapps.com:443 + ibm_operator_catalog: + mas_catalog_version: v8-240405-amd64 + parameters: + - name: source.repo_url + - value: "https://github.com/..." + - name: argo.namespace + value: "openshift-gitops" + destination: + server: 'https://kubernetes.default.svc' + namespace: openshift-gitops +``` + + + +The Cluster Root Application +------------------------------------------------------------------------------- + +**Cluster Root Applications** render the {{ cluster_root_chart() }} into the ArgoCD namespace of the {{ management_cluster() }}. + +The {{ cluster_root_chart() }} contains templates to conditionally render ArgoCD Applications that deploy cluster-wide resources to {{ target_clusters() }} once the configuration for those resources is present in the {{ config_repo() }}. + +Application-specific configuration is held under a unique top-level field. For example, the `ibm_operator_catalog` field in our example above holds all configuration for the {{ gitops_repo_dir_link("cluster-applications/000-ibm-operator-catalog", "000-ibm-operator-catalog chart") }}. The {{ gitops_repo_file_link("root-applications/ibm-mas-cluster-root/templates/000-ibm-operator-catalog-app.yaml", "000-ibm-operator-catalog-app template") }} that renders this chart is guarded by: +```yaml +{% raw %} +{{- if not (empty .Values.ibm_operator_catalog) }} +{% endraw %} +``` +Continuing with our example, because `ibm_operator_catalog` is present in the Helm values for both **Cluster Root Applications**, both will render the {{ gitops_repo_file_link("root-applications/ibm-mas-cluster-root/templates/000-ibm-operator-catalog-app.yaml", "000-ibm-operator-catalog-app template") }} into the respective {{ target_cluster() }}. + +A simplified and abridged snippet of the {{ gitops_repo_file_link("root-applications/ibm-mas-cluster-root/templates/000-ibm-operator-catalog-app.yaml", "000-ibm-operator-catalog-app template") }} is shown below, followed by a breakdown of the purpose of each section: + +```yaml +{% raw %} +kind: Application +metadata: + name: operator-catalog.{{ .Values.cluster.id }} +spec: + source: + path: cluster-applications/000-ibm-operator-catalog + plugin: + name: argocd-vault-plugin-helm + env: + - name: HELM_VALUES + value: | + mas_catalog_version: "{{ .Values.ibm_operator_catalog.mas_catalog_version }}" + destination: + server: {{ .Values.cluster.url }} +{% endraw %} +``` + +The template generates an **Operator Catalog Application** named according to its type (`operator-catalog`) and includes the cluster ID: +```yaml +{% raw %}kind: Application +metadata: + name: operator-catalog.{{ .Values.cluster.id }}{% endraw %} +``` + +The **Operator Catalog Application** renders the {{ gitops_repo_dir_link("cluster-applications/000-ibm-operator-catalog", "000-ibm-operator-catalog chart") }}: +```yaml +{% raw %}spec: + source: + path: cluster-applications/000-ibm-operator-catalog{% endraw %} +``` + +Values are mapped from those in the **Cluster Root Application** manifest into the form expected by the {{ gitops_repo_dir_link("cluster-applications/000-ibm-operator-catalog", "000-ibm-operator-catalog chart") }}. + +```yaml + {% raw %}plugin: + name: argocd-vault-plugin-helm + env: + - name: HELM_VALUES + value: | + mas_catalog_version: "{{ .Values.ibm_operator_catalog.mas_catalog_version }}"{% endraw %} +``` + + +!!! info + Some of these values (not shown here) will be [inline-path placeholders](https://argocd-vault-plugin.readthedocs.io/en/stable/howitworks/#inline-path-placeholders) for referencing secrets in the **Secrets Vault**, so we pass the values in via the AVP plugin source (rather than the `helm` source): + + +Finally, the resources in the {{ gitops_repo_dir_link("cluster-applications/000-ibm-operator-catalog", "000-ibm-operator-catalog chart") }} should created on the {{ target_cluster() }} in order to install the IBM operator catalog there: +```yaml + {% raw %}destination: + server: {{ .Values.cluster.url }}{% endraw %} +``` + + +For our example configuration, two **Operator Catalog Applications** will be generated: + +```yaml +kind: Application +metadata: + name: operator-catalog.cluster1 +spec: + destination: + server: https://api.cluster1.cakv.p3.openshiftapps.com:443 + source: + path: cluster-applications/000-ibm-operator-catalog + plugin: + name: argocd-vault-plugin-helm + env: + - name: HELM_VALUES + value: | + mas_catalog_version: "v8-240430-amd64" +``` + +```yaml +kind: Application +metadata: + name: operator-catalog.cluster2 +spec: + destination: + server: https://api.cluster2.jsig.p3.openshiftapps.com:443 + source: + path: cluster-applications/000-ibm-operator-catalog + plugin: + name: argocd-vault-plugin-helm + env: + - name: HELM_VALUES + value: | + mas_catalog_version: "v8-240405-amd64" +``` + + +The other Application templates in the {{ cluster_root_chart() }} (e.g. {{ gitops_repo_file_link("root-applications/ibm-mas-cluster-root/templates/010-ibm-redhat-cert-manager-app.yaml", "010-ibm-redhat-cert-manager-app.yaml") }}, {{ gitops_repo_file_link("root-applications/ibm-mas-cluster-root/templates/020-ibm-dro-app.yaml", "020-ibm-dro-app.yaml") }} and so on) all follow this pattern and work in a similar way. + +The {{ cluster_root_chart() }} also includes the {{ instance_root_app_set() }} template which generates a new **Instance Root Application Set** for each cluster. + +The Instance Root Application Set +------------------------------------------------------------------------------- + +The {{ instance_root_app_set() }} generates a set of **Instance Root Applications** based on the configuration in the {{ config_repo() }}. It follows the same pattern as the {{ cluster_root_app_set() }} as described [above](#the-cluster-root-application-set). + +The key differences are: + +- `merge-keys` in the instance-level configuration YAML files also contain a MAS instance ID, e.g. `dev/cluster1/instance1`. +- The generated **Instance Root Applications** source the {{ gitops_repo_dir_link("root-applications/ibm-mas-instance-root", "ibm-mas-instance-root Chart") }}. +- The Git File Generators look for a different set of named YAML files at the **instance** level in the {{ config_repo() }}: + + +A simplified and abridged snippet showing the Merge and Git File generators from the {{ instance_root_app_set() }} template is shown below: + +```yaml +{% raw %}spec: + ... + generators: + - merge: + mergeKeys: + - 'merge-key' + generators: + - git: + files: + - path: "{{ .Values.account.id }}/{{ .Values.cluster.id }}/*/ibm-mas-instance-base.yaml" + - git: + files: + - path: "{{ .Values.account.id }}/{{ .Values.cluster.id }}/*/ibm-mas-suite.yaml"{% endraw %} +``` + +Continuing with our example, let's add some additional instance-level config files to the {{ config_repo() }} (only showing `cluster1` this time for brevity). These are the files that the Git File Generators above are looking for. + +``` +├── dev +│ ├── cluster1 +│ │ ├── ibm-mas-cluster-base.yaml +│ │ ├── ibm-operator-catalog.yaml +│   | ├── instance1 +│   | │   ├── ibm-mas-instance-base.yaml +│   | │   ├── ibm-mas-suite.yaml +``` + +Now let's take a look at the contents of the new instance-level files: + +``` +├── dev +│   ├── cluster1 +│   │   ├── ibm-mas-cluster-base.yaml +│   │   ├── ibm-operator-catalog.yaml +│   | ├── instance1 +| | | |------------------------------------------- +│   | │   ├── ibm-mas-instance-base.yaml +| | | |------------------------------------------- +| | | | merge-key: "dev/cluster1/instance1" +| | | | account: +| | | | id: dev +| | | | cluster: +| | | | id: cluster1 +| | | | url: https://api.cluster1.cakv.p3.openshiftapps.com:443 +| | | | instance: +| | | | id: instance1 +| | | | +| | | |------------------------------------------- +│   | │   ├── ibm-mas-suite.yaml +| | | |------------------------------------------- +| | | | merge-key: "dev/cluster1/instance1" +| | | | ibm_mas_suite: +| | | | mas_channel: "8.11.x" +... +``` + +As with the cluster-level config, all files contain the `merge-key`, but this times it also includes the MAS instance ID. This is used by the Merge generator to group together configuration into per-instance YAML objects for each {{ target_cluster() }}. + +The `ibm-mas-instance-base.yaml` file contains global configuration for the instance on the {{ target_cluster() }}, including the `account.id`, and the `cluster.id`, the `cluster.url` and the `instance.id`. + +The other YAML configuration files (such as `ibm-mas-suite.yaml` shown above) represent one type of instance-level resource that we wish to install on the {{ target_cluster() }}. + +Given the config above, the {{instance_root_app_set }} would generate one YAML object: +```yaml +merge-key: "dev/cluster1/instance1" +account: + id: dev +cluster: + id: cluster1 + url: https://api.cluster1.cakv.p3.openshiftapps.com:443 +instance: + id: instance1 +ibm_mas_suite: + mas_channel: "8.11.x" +``` + +Follow the same pattern used in the {{ cluster_root_app_set() }} as described [above](#the-cluster-root-application-set), the YAML object is used to render tje **Instance Root Application Set** template, generating an **Instance Root Application**: +```yaml +kind: Application +metadata: + name: instance.cluster1.instance1 +spec: + source: + path: root-applications/ibm-mas-instance-root + helm: + values: |- + merge-key: dev/cluster1/instance1 + account: + id: dev + cluster: + id: cluster1 + url: https://api.cluster1.cakv.p3.openshiftapps.com:443 + instance: + id: instance1 + ibm_mas_suite: + mas_channel: "8.11.x" + parameters: + - name: source.repo_url + value: "https://github.com/..." + - name: argo.namespace + value: "openshift-gitops" + destination: + server: 'https://kubernetes.default.svc' + namespace: openshift-gitops +``` + +. + +The Instance Root Application +------------------------------------------------------------------------------- + +**Instance Root Applications** render the {{ instance_root_chart() }} into the ArgoCD namespace of the {{ management_cluster() }}. + +The {{ instance_root_chart() }} contains templates to conditionally render ArgoCD Applications that deploy MAS instances to **Target Clusters** once the configuration for the ArgoCD Application is present in the {{ config_repo() }}. + +It follows the same pattern as the **Cluster Root Application** described [above](#the-cluster-root-application); specific applications are enabled once their configuration is pushed to the {{ config_repo() }}. For instance, the {{ gitops_repo_file_link("root-applications/ibm-mas-instance-root/templates/130-ibm-mas-suite-app.yaml", "130-ibm-mas-suite-app.yaml template") }} generates an Application that deploys the MAS `Suite` CR to the target cluster once configuration under the `ibm_mas_suite` key is present. + +Some special templates are capable of generating multiple applications: + +- {{ gitops_repo_file_link("root-applications/ibm-mas-instance-root/templates/120-db2-databases-app.yaml", "120-db2-databases-app.yaml") }} +- {{ gitops_repo_file_link("root-applications/ibm-mas-instance-root/templates/130-ibm-mas-suite-configs-app.yaml", "130-ibm-mas-suite-configs-app.yaml") }} +- {{ gitops_repo_file_link("root-applications/ibm-mas-instance-root/templates/200-ibm-mas-workspaces.yaml", "200-ibm-mas-workspaces.yaml") }} +- {{ gitops_repo_file_link("root-applications/ibm-mas-instance-root/templates/510-550-ibm-mas-masapp-configs", "510-550-ibm-mas-masapp-configs") }} + +These are used when there can be more than one instance of the *type* of resource that these Applications are responsible for managing. + +For example, MAS instances may require more than one DB2 Database. To accommodate this, we make use of the Helm `range` control structure to iterate over a list in YAML configuration files in the {{ config_repo()}}. + +For instance, the `ibm-db2u-databases.yaml` configuration file contains: +```yaml +ibm_db2u_databases: + - mas_application_id: iot + db2_memory_limits: 12Gi + ... + - mas_application_id: manage + db2_memory_limits: 16Gi + db2_database_db_config: + CHNGPGS_THRESH: '40' + ... + ... +``` + +The {{ gitops_repo_file_link("root-applications/ibm-mas-instance-root/templates/120-db2-databases-app.yaml", "120-db2-databases-app.yaml template") }} iterates over this list to generate multiple DB2 Database Applications configured as needed: + +```yaml +{% raw %} +{{- range $i, $value := .Values.ibm_db2u_databases }} +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: "db2-db.{{ $.Values.cluster.id }}.{{ $.Values.instance.id }}.{{ $value.mas_application_id }}" +... +{{- end}} +{% endraw %} +``` + + +!!! info "Why not use ApplicationSets here?" + + We encountered some limitations when using ApplicationSets for this purpose. For instance, Applications generated by ApplicationSets do not participate in the [ArgoCD syncwave](https://argo-cd.readthedocs.io/en/stable/user-guide/sync-waves/) with other Applications so we would have no way of ensuring that resources would be configured in the correct order. By using the Helm `range` control structure we generate "normal" Applications that do not suffer from this limitation. This means, for instance, that we can ensure that DB2 Databases are configured **before** attempting to provide the corresponding JDBC configuration to MAS. diff --git a/docs/drawio/appstructure.drawio b/docs/drawio/appstructure.drawio new file mode 100644 index 000000000..63fa11298 --- /dev/null +++ b/docs/drawio/appstructure.drawio @@ -0,0 +1,235 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/drawio/architecture.drawio b/docs/drawio/architecture.drawio new file mode 100644 index 000000000..292633918 --- /dev/null +++ b/docs/drawio/architecture.drawio @@ -0,0 +1,554 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/drawio/mcsp-argocd-mas-layout.drawio b/docs/drawio/mcsp-argocd-mas-layout.drawio new file mode 100644 index 000000000..a31048318 --- /dev/null +++ b/docs/drawio/mcsp-argocd-mas-layout.drawio @@ -0,0 +1,139 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/helmcharts.md b/docs/helmcharts.md new file mode 100644 index 000000000..545818425 --- /dev/null +++ b/docs/helmcharts.md @@ -0,0 +1,86 @@ +{{ source_repo() }} +=============================================================================== + +The {{ source_repo() }} (usually [ibm-mas/gitops](https://github.com/ibm-mas/gitops)) provides Helm Charts that define all of the Kubernetes resources required to deploy MAS instances using ArgoCD. The Helm Charts are split across three sub directories, depending on their intended target: + +- **{{ gitops_repo_dir_link("root-applications") }}**: these charts define ArgoCD Application and ApplicationSet templates following the [App of Apps pattern](https://argo-cd.readthedocs.io/en/stable/operator-manual/cluster-bootstrapping/#app-of-apps-pattern) and target the {{ management_cluster() }} where ArgoCD is running. The Applications and ApplicationSets render other charts from {{ gitops_repo_dir_link("root-applications") }}, {{ gitops_repo_dir_link("cluster-applications") }} or {{ gitops_repo_dir_link("instance-applications") }} +- **{{ gitops_repo_dir_link("cluster-applications") }}**: these charts define Kubernetes resources for installing cluster-wide MAS pre-requisites on {{ target_clusters() }} where MAS is to be installed and managed. +- **{{ gitops_repo_dir_link("instance-applications") }}**: these charts define Kubernetes resources for installing and managing MAS instances on {{ target_clusters() }}. + + +The following figure shows a tree of ArgoCD applications and Application Sets generated by the charts under **{{ gitops_repo_dir_link("root-applications") }}**, starting with the **Account Root Application** at the top. + +![Application Structure](drawio/appstructure.drawio) + + +## {{ account_root_chart() }} + +**Account Root Applications** render the {{ account_root_chart() }}, which installs the {{ cluster_root_app_set() }}. This generates a set of **Cluster Root Applications** based on the configuration in the {{ config_repo() }} + + +## {{ cluster_root_chart() }} + +**Cluster Root Applications** render the {{ cluster_root_chart() }}, which contains templates to generate ArgoCD Applications for configuring various dependencies shared by MAS instances on {{ target_clusters() }}. + +| Application | Template | Helm Chart | +|-|-|-| +|IBM Operator Catalog | {{ gitops_repo_file_link("root-applications/ibm-mas-cluster-root/templates/000-ibm-operator-catalog-app.yaml", "000-ibm-operator-catalog-app.yaml") }} | {{ gitops_repo_dir_link("cluster-applications/000-ibm-operator-catalog", "000-ibm-operator-catalog") }} | +|Redhat Certificate Manager | {{ gitops_repo_file_link("root-applications/ibm-mas-cluster-root/templates/010-ibm-redhat-cert-manager-app.yaml", "010-ibm-redhat-cert-manager-app.yaml") }} | {{ gitops_repo_dir_link("cluster-applications/010-redhat-cert-manager", "010-redhat-cert-manager") }} | +|Data Reporter Operator (DRO) | {{ gitops_repo_file_link("root-applications/ibm-mas-cluster-root/templates/020-ibm-dro-app.yaml", "020-ibm-dro-app.yaml") }} | {{ gitops_repo_dir_link("cluster-applications/020-ibm-dro", "020-ibm-dro") }} | +|Db2u Operator | {{ gitops_repo_file_link("root-applications/ibm-mas-cluster-root/templates/060-ibm-db2u-app.yaml", "060-ibm-db2u-app.yaml") }} | {{ gitops_repo_dir_link("cluster-applications/060-ibm-db2u", "060-ibm-db2u") }} | +|CIS Compliance | {{ gitops_repo_file_link("root-applications/ibm-mas-cluster-root/templates/040-cis-compliance-app.yaml", "040-cis-compliance-app.yaml") }} | {{ gitops_repo_dir_link("cluster-applications/040-cis-compliance", "040-cis-compliance") }} | +|Nvidia GPU Operator | {{ gitops_repo_file_link("root-applications/ibm-mas-cluster-root/templates/050-nvidia-gpu-operator-app.yaml", "050-nvidia-gpu-operator-app.yaml") }} | {{ gitops_repo_dir_link("cluster-applications/050-nvidia-gpu-operator", "050-nvidia-gpu-operator") }} | + +The {{ cluster_root_chart() }} also installs the {{ instance_root_app_set() }}. This generates a set of **Instance Root Applications** based on the configuration in the {{ config_repo() }}. + +## {{ instance_root_chart() }} + +**Instance Root Applications** render the {{ instance_root_chart() }}, which contains templates to generate ArgoCD Applications that installs MAS on {{ target_clusters() }}, including: + +##### Instance-level MAS dependencies: + +| Application | Template | Helm Chart | +|-|-|-| +|Cloudpak 4 Data (CP4D) | {{ gitops_repo_file_link("root-applications/ibm-mas-instance-root/templates/080-ibm-cp4d-app.yaml", "080-ibm-cp4d-app.yaml") }} | {{ gitops_repo_dir_link("instance-applications/080-ibm-cp4d", "080-ibm-cp4d") }} | +|Suite License Service (SLS) | {{ gitops_repo_file_link("root-applications/ibm-mas-instance-root/templates/100-ibm-sls-app.yaml", "100-ibm-sls-app.yaml") }} | {{ gitops_repo_dir_link("instance-applications/100-ibm-sls", "100-ibm-sls") }} +|DB2 Databases | {{ gitops_repo_file_link("root-applications/ibm-mas-instance-root/templates/120-db2-databases-app.yaml", "120-db2-databases-app.yaml") }}[^1] | {{ gitops_repo_dir_link("instance-applications/120-ibm-db2u-database", "120-ibm-db2u-database") }} | +|Watson Studio Local (WSL) | {{ gitops_repo_file_link("root-applications/ibm-mas-instance-root/templates/090-ibm-wsl-app.yaml", "090-ibm-wsl-app.yaml") }} | {{ gitops_repo_dir_link("instance-applications/090-ibm-wsl", "090-ibm-wsl") }} | + + +##### MAS Core Platform + +This includes the suite, suite configurations and core workspaces + +| Application | Template | Helm Chart | +|-|-|-| +|MAS Core Suite | {{ gitops_repo_file_link("root-applications/ibm-mas-instance-root/templates/130-ibm-mas-suite-app.yaml", "130-ibm-mas-suite-app.yaml") }} | {{ gitops_repo_dir_link("instance-applications/130-ibm-mas-suite", "130-ibm-mas-suite") }} | +|MAS Workspaces | {{ gitops_repo_file_link("root-applications/ibm-mas-instance-root/templates/200-ibm-mas-workspaces.yaml", "200-ibm-mas-workspaces.yaml") }}[^1] | {{ gitops_repo_dir_link("instance-applications/220-ibm-mas-workspace", "220-ibm-mas-workspace") }} | +|Suite Configs | {{ gitops_repo_file_link("root-applications/ibm-mas-instance-root/templates/130-ibm-mas-suite-configs-app.yaml", "130-ibm-mas-suite-configs-app.yaml") }}[^1] | Multiple charts [^2] | + + +#### MAS Applications + +These share a {{ gitops_repo_dir_link("instance-applications/500-540-ibm-mas-suite-app-install", "generic chart") }}. + +| Application | Template | +|-|-| +|Assist | {{ gitops_repo_file_link("root-applications/ibm-mas-instance-root/templates/500-ibm-mas-masapp-assist-install.yaml", "500-ibm-mas-masapp-assist-install.yaml") }} | +|IoT | {{ gitops_repo_file_link("root-applications/ibm-mas-instance-root/templates/500-ibm-mas-masapp-iot-install.yaml", "500-ibm-mas-masapp-iot-install.yaml") }} | +|Manage | {{ gitops_repo_file_link("root-applications/ibm-mas-instance-root/templates/500-ibm-mas-masapp-manage-install.yaml", "500-ibm-mas-masapp-manage-install.yaml") }} | +|VisualInspection | {{ gitops_repo_file_link("root-applications/ibm-mas-instance-root/templates/500-ibm-mas-masapp-visualinspection-install.yaml", "500-ibm-mas-masapp-visualinspection-install.yaml") }} | +|Health | {{ gitops_repo_file_link("root-applications/ibm-mas-instance-root/templates/520-ibm-mas-masapp-health-install.yaml", "520-ibm-mas-masapp-health-install.yaml") }} | +|Monitor | {{ gitops_repo_file_link("root-applications/ibm-mas-instance-root/templates/520-ibm-mas-masapp-monitor-install.yaml", "520-ibm-mas-masapp-monitor-install.yaml") }} | +|Optimizer | {{ gitops_repo_file_link("root-applications/ibm-mas-instance-root/templates/520-ibm-mas-masapp-optimizer-install.yaml", "520-ibm-mas-masapp-optimizer-install.yaml") }} | +|Predict | {{ gitops_repo_file_link("root-applications/ibm-mas-instance-root/templates/540-ibm-mas-masapp-predict-install.yaml", "540-ibm-mas-masapp-predict-install.yaml") }} | + +#### MAS Application Workspace Config + +The generic {{ gitops_repo_file_link("root-applications/ibm-mas-instance-root/templates/510-550-ibm-mas-masapp-configs.yaml", "510-550-ibm-mas-masapp-configs.yaml") }}[^1] template and {{ gitops_repo_dir_link("instance-applications/510-550-ibm-mas-suite-app-config", "510-550-ibm-mas-suite-app-config") }} chart is used for creating MAS Application workspaces. + + + +[^1]: These templates are capable of generating multiple Applications; necessary because there may be one or more instances of the **type** of resource they are responsible for managing. +[^2]: + The {{ gitops_repo_file_link("root-applications/ibm-mas-instance-root/templates/130-ibm-mas-suite-configs-app.yaml", "Suite Configs") }} Application is responsible for installing various types of suite configuration types (Mongo, BAS, SMTP, etc) at various scopes (`system`, `app`, `ws`, `wsapp`). The chart is chosen dynamically based on the configuration type: {{ gitops_repo_dir_link("instance-applications/130-ibm-jdbc-config", "JDBC") }}, {{ gitops_repo_dir_link("instance-applications/130-ibm-kafka-config", "Kafka") }}, {{ gitops_repo_dir_link("instance-applications/130-ibm-mas-bas-config", "BAS") }}, {{ gitops_repo_dir_link("instance-applications/130-ibm-mas-idp-config", "IDP") }}, {{ gitops_repo_dir_link("instance-applications/130-ibm-mas-mongo-config", "Mongo") }}, {{ gitops_repo_dir_link("instance-applications/130-ibm-mas-sls-config", "SLS") }}, {{ gitops_repo_dir_link("instance-applications/130-ibm-mas-smtp-config", "SMTP") }}, {{ gitops_repo_dir_link("instance-applications/130-ibm-objectstorage-config", "COS") }} + + diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 000000000..f75a17ec3 --- /dev/null +++ b/docs/index.md @@ -0,0 +1,7 @@ +IBM Maximo Application Suite - GitOps +=============================================================================== + +A GitOps approach to managing Maximo Application Suite. + +!!! note + This documentation is not yet ready to provide an exhaustive and comprehensive guide to using MAS GitOps for your own deployments. Instead, it is intended to promote understanding of the concepts involved and the approach we have taken. If you would like to deploy MAS via GitOps, please refer to the [ibm-mas/gitops-demo](https://github.com/ibm-mas/gitops-demo/tree/002) for a step-by-step guide using the MAS CLI. diff --git a/docs/limitations.md b/docs/limitations.md new file mode 100644 index 000000000..54e9cf32d --- /dev/null +++ b/docs/limitations.md @@ -0,0 +1,11 @@ +Known Limitations +=============================================================================== + +**A single ArgoCD instance cannot manage more than one Account Root Application.**. This is primarily due to a limitation we have inherited to be compatible with internal IBM systems where we must have everything under a single ArgoCD project. This limitation could be addressed by adding support for multi-project configurations, assigning each **Account Root Application** its own project in ArgoCD. This is something we'd like to do in the long term but it's not a priority at the moment. + + +**MAS GitOps only supports AWS Secrets Manager at present.** Support for other backends will be added in future releases. + +Any modifications made via the MAS admin UI or REST API that result in modifications to existing K8S resources will be undone by ArgoCD. We plan to provide the option in MAS to disable these UI/REST APIs when being managed by GitOps. + +MAS GitOps only supports the definition of `system` scope for all MAS configuration types (other than `JDBC` which supports all scopes: `system`, `ws`, `app` and `wsapp`). \ No newline at end of file diff --git a/docs/orchestration.md b/docs/orchestration.md new file mode 100644 index 000000000..d5ef83ea4 --- /dev/null +++ b/docs/orchestration.md @@ -0,0 +1,87 @@ +Deployment Orchestration +=============================================================================== + +The MAS GitOps Helm Charts have been developed with the aim of simplifying the orchestration of MAS deployments as much as possible. +Once a {{ target_cluster() }} has been provisioned and registered with the ArgoCD instance running in the {{ management_cluster() }}, MAS instances can be deployed and managed on that {{ target_cluster() }} solely by registering secrets in the {{ secrets_vault() }} and pushing configuration files to the {{ config_repo() }}. There is no need to run any commands against ArgoCD or the {{ target_cluster() }} to initiate or control synchronization. + +This is achieved using a combination of the following ArgoCD mechanisms: + + - [Automated Sync Policies](https://argo-cd.readthedocs.io/en/stable/user-guide/auto_sync/#automated-sync-policy) + - [Sync Waves](https://argo-cd.readthedocs.io/en/stable/user-guide/sync-waves/) + - [Custom Resource Healthchecks](https://argo-cd.readthedocs.io/en/stable/operator-manual/health/#custom-health-checks) + - [Resource Hooks](https://argo-cd.readthedocs.io/en/stable/user-guide/resource_hooks/) + + +Automated Sync Policies +------------------------------------------------------------------------------- + +The ArgoCD Application Set git generators poll the {{ config_repo() }} every three minutes and will automatically pick up configuration files pushed the the {{ config_repo() }}. + +!!! tip + + If needed, ArgoCD can be configured to [receive webhook events](https://argo-cd.readthedocs.io/en/stable/operator-manual/applicationset/Generators-Git/#webhook-configuration) to eliminate the inherent delay introduced by the default polling behaviour. + +The resulting MAS GitOps Applications will be automatically synced as they have an automated sync policy: +```yaml +syncPolicy: + automated: + selfHeal: true + prune: true +``` +In addition: + +- [`selfHeal: true`](https://argo-cd.readthedocs.io/en/stable/user-guide/auto_sync/#automatic-self-healing): causes ArgoCD to trigger a sync if changes are made to a ArgoCD-managed resource in the live cluster by something other than ArgoCD (e.g. a human operator). This forces any updates to MAS configuration to be made by pushing a commit to the {{ config_repo() }}, ensuring that the configuration in the {{ config_repo() }} is always the "source of truth". +- [`prune: true`](https://argo-cd.readthedocs.io/en/stable/user-guide/auto_sync/#automatic-pruning): this allows ArgoCD to automatically deprovision MAS resources when their corresponding configuration files are deleted from the {{ config_repo() }}. + +!!! info + + We may make `prune` configurable on a per-account basis in future releases. `prune: true` is useful in development systems as it allows MAS instances to be deprovisioned with no manual intervention. This may be too risky for use in production systems though and `prune: false` may be necessary; meaning a request must be made to ArgoCD after configuration files are deleted to explicitly perform a sync with pruning enabled. + +Sync Waves +------------------------------------------------------------------------------- + +All Kubernetes resources defined in the MAS GitOps Helm Charts are annotated with an ArgoCD [sync wave](https://argo-cd.readthedocs.io/en/stable/user-guide/sync-waves/). This ensures that resources (including generated ArgoCD Applications on the {{ management_cluster() }} and Kubernetes resources on {{ target_cluster() }}s) are synced in the correct order. + +!!! note + + For clarity, all resource filenames are prefixed with the sync wave that they belong to. + +!!! note + + Sync waves are *local* to each ArgoCD application (i.e. each Helm chart). + +Custom Resource Healthchecks +------------------------------------------------------------------------------- + +MAS GitOps requires a set of [Custom Resource Healthchecks](https://argo-cd.readthedocs.io/en/stable/operator-manual/health/#custom-health-checks) to be registered with the ArgoCD in the {{ management_cluster() }}. + +This allows ArgoCD to properly interpret and report the health status of the various custom resources used by MAS. This is a crucial part of ensuring that resources have finished reconciling before allowing subsequent sync waves (which may contain dependent resources) to proceed. + +The set of Custom Resource Healthchecks required by MAS GitOps can be found in the [ibm-mas/cli project](https://github.com/ibm-mas/cli/blob/45cc815ec6244c9d58e050900ec0e27403d9ea92/image/cli/mascli/templates/gitops/bootstrap/argocd.yaml#L83). + + +Resource Hooks +------------------------------------------------------------------------------- + +Configuration tasks have to be performed at various points during the MAS synchronization procedure. We achieve this via the use of ArgoCD [Resource Hooks](https://argo-cd.readthedocs.io/en/stable/user-guide/resource_hooks/). + + +#### PreSync Hooks +Tasks that must be performed **before** an Application begins syncing are defined as `PreSync` hooks. These are used, for example, to verify that cluster CRDs are present before proceeding with an installation (e.g. {{ gitops_repo_file_link("instance-applications/120-ibm-db2u-database/templates/00-presync-await-crd_Job.yaml", "00-presync-await-crd_Job") }}). + + +### "PostSync" Hooks +Tasks that must be performed **after** an Application finishes syncing (before **before** it can report `Healthy`) are performed by Kubernetes Jobs in the final sync wave of the Application. + +Jobs of this kind typically perform some post-install configuration (e.g. {{ gitops_repo_file_link("instance-applications/120-ibm-db2u-database/templates/05-postsync-setup-db2_Job.yaml", "05-postsync-setup-db2_Job") }}) and/or register some runtime-generated information as a secret in the {{ secrets_vault() }} for use by downstream applications (e.g. {{ gitops_repo_file_link("cluster-applications/020-ibm-dro/templates/08-postsync-update-sm_Job.yaml", "08-postsync-update-sm_Job") }}). + + +!!! info + + You may notice that we do not actually use the `PostSync` ArgoCD annotation on many of these Jobs. This is because the completion status of Jobs annotated as `PostSync` is not taken into account when computing the overall health status of an application. Since the tasks we perform are typically required steps that must be performed before downstream applications in later sync waves are allowed to sync, we instead use "ordinary" Kuberenetes Jobs. Since the health status of "ordinary" Kubernetes Jobs **is** taken into account, subsequent sync waves will not be allowed to start until the Job has completed successfully. + + + +### PostDelete Hooks + +Tasks that must be performed to ensure an orderly teardown of resources when configuration files are deleted from the {{ config_repo() }}. For example, Suite Config CRs (e.g. `MongoCfg`) cannot be pruned by ArgoCD since they are assigned the `Suite` as an owner during reconciliation. To work around this, we use PostDelete hooks to issue `oc delete` commands (e.g. {{ gitops_repo_file_link("instance-applications/130-ibm-mas-mongo-config/templates/postdelete-delete-cr.yaml", "postdelete-delete-cr") }}). \ No newline at end of file diff --git a/docs/secrets.md b/docs/secrets.md new file mode 100644 index 000000000..b4b446fb8 --- /dev/null +++ b/docs/secrets.md @@ -0,0 +1,22 @@ +The {{ secrets_vault() }} +=============================================================================== + +Sensitive values that should not be exposed in the {{ config_repo() }} are stored as secrets in the {{ secrets_vault() }}. Secrets are fetched at runtime using the [ArgoCD Vault Plugin](https://argocd-vault-plugin.readthedocs.io/en/stable/) from some backend implementation (e.g. [AWS Secrets Manager](https://aws.amazon.com/secrets-manager/)). + +Secrets are referenced in the YAML configuration files in the {{ config_repo() }} as inline-path placeholders. For example: +```yaml +ibm_entitlement_key: "" +``` + +These are referenced in Helm Chart templates, e.g. {{ gitops_repo_file_link("cluster-applications/000-ibm-operator-catalog/templates/02-ibm-entitlement_Secret.yaml", "02-ibm-entitlement_Secret" ) }}: +```yaml +data: + .dockerconfigjson: >- + {% raw %}{{ .Values.ibm_entitlement_key }}{% endraw %} +``` + +During rendering of the Helm Chart, the ArgoCD Vault Plugin will fetch the secret value from the {{ secrets_vault() }} at runtime and substitute it into the template. + +!!! info + MAS GitOps only supports AWS Secrets Manager at present. Support for other backends will be added in future releases. + diff --git a/example-config/dev/cluster1/ibm-db2u.yaml b/example-config/dev/cluster1/ibm-db2u.yaml new file mode 100644 index 000000000..828206240 --- /dev/null +++ b/example-config/dev/cluster1/ibm-db2u.yaml @@ -0,0 +1,8 @@ +merge-key: "dev/cluster1" + +ibm_db2u: + db2_namespace: db2u + + db2_channel: + + ibm_entitlement_key: "" diff --git a/example-config/dev/cluster1/ibm-dro.yaml b/example-config/dev/cluster1/ibm-dro.yaml new file mode 100644 index 000000000..508fb5963 --- /dev/null +++ b/example-config/dev/cluster1/ibm-dro.yaml @@ -0,0 +1,6 @@ +merge-key: "dev/cluster1" + +ibm_dro: + dro_namespace: redhat-marketplace + ibm_entitlement_key: "" + run_sync_hooks: true diff --git a/example-config/dev/cluster1/ibm-mas-cluster-base.yaml b/example-config/dev/cluster1/ibm-mas-cluster-base.yaml new file mode 100644 index 000000000..84a806166 --- /dev/null +++ b/example-config/dev/cluster1/ibm-mas-cluster-base.yaml @@ -0,0 +1,18 @@ +merge-key: "dev/cluster1" + +account: + id: dev + +region: + id: us-east-1 + +cluster: + id: cluster1 + url: https://api.cluster1.cakv.p3.openshiftapps.com:443 + +sm: + aws_access_key_id: "" + aws_secret_access_key: "" + +notifications: + slack_channel_id: devcluster1slack diff --git a/example-config/dev/cluster1/ibm-operator-catalog.yaml b/example-config/dev/cluster1/ibm-operator-catalog.yaml new file mode 100644 index 000000000..c478828aa --- /dev/null +++ b/example-config/dev/cluster1/ibm-operator-catalog.yaml @@ -0,0 +1,6 @@ +merge-key: "dev/cluster1" + +ibm_operator_catalog: + mas_catalog_version: v8-240430-amd64 + mas_catalog_image: icr.io/cpopen/ibm-maximo-operator-catalog + ibm_entitlement_key: "" diff --git a/example-config/dev/cluster1/instance1/ibm-db2u-databases.yaml b/example-config/dev/cluster1/instance1/ibm-db2u-databases.yaml new file mode 100644 index 000000000..e03c8ae20 --- /dev/null +++ b/example-config/dev/cluster1/instance1/ibm-db2u-databases.yaml @@ -0,0 +1,96 @@ +merge-key: "dev/cluster1/instance1" +ibm_db2u_databases: + - db2_namespace: db2u + db2_instance_name: db2wh-instance1-manage + db2_dbname: BLUDB + db2_version: s11.5.9.0-cn1 + db2_table_org: ROW + db2_node_label: + db2_dedicated_node: + db2_instance_registry: + DB2_CDE_REDUCED_LOGGING: 'REDUCED_REDO:NO' + DB2_OBJECT_STORAGE_LOCAL_STAGING_PATH: '/mnt/backup/staging' + DB2_BCKP_PAGE_VERIFICATION: 'TRUE' + DB2_WORKLOAD: 'MAXIMO' + DB2_SKIPINSERTED: 'ON' + DB2_INLIST_TO_NLJN: 'YES' + DB2_MINIMIZE_LISTPREFETCH: 'Y' + DB2_EVALUNCOMMITTED: 'YES' + DB2_SKIPDELETED: 'ON' + DB2_FMP_COMM_HEAPSZ: '65536' + DB2_USE_ALTERNATE_PAGE_CLEANING: 'ON' + DB2AUTH: 'OSAUTHDB,ALLOW_LOCAL_FALLBACK,PLUGIN_AUTO_RELOAD' + DB2_4K_DEVICE_SUPPORT: 'ON' + DB2_FMP_RUN_AS_CONNECTED_USER: 'NO' + db2_database_db_config: + CHNGPGS_THRESH: '40' + DFT_QUERYOPT: '5' + LOGBUFSZ: '1024' + LOCKLIST: 'AUTOMATIC' + MAXFILOP: '61440' + NUM_IOCLEANERS: 'AUTOMATIC' + NUM_IOSERVERS: 'AUTOMATIC' + STMTHEAP: '20000' + CUR_COMMIT: 'ON' + AUTO_REVAL: 'DEFERRED' + DEC_TO_CHAR_FMT: 'NEW' + DATABASE_MEMORY: 'AUTOMATIC' + PCKCACHESZ: 'AUTOMATIC' + DBHEAP: 'AUTOMATIC' + STAT_HEAP_SZ: 'AUTOMATIC' + SOFTMAX: '0' + CATALOGCACHE_SZ: '800' + LOCKTIMEOUT: '300' + LOGPRIMARY: '100' + LOGSECOND: '156' + LOGFILSIZ: '32768' + LOGARCHMETH1: 'DISK:/mnt/bludata0/db2/archive_log/' + MIRRORLOGPATH: '/mnt/backup/MIRRORLOGPATH' + STMT_CONC: 'LITERALS' + DDL_CONSTRAINT_DEF: 'YES' + TRACKMOD: 'YES' + AUTO_DEL_REC_OBJ: 'ON' + REC_HIS_RETENTN: '60' + NUM_DB_BACKUPS: '60' + DFT_TABLE_ORG: 'ROW' + AUTO_MAINT: 'ON' + AUTO_TBL_MAINT: 'ON' + AUTO_RUNSTATS: 'ON' + AUTO_REORG: 'OFF' + AUTO_DB_BACKUP: 'OFF' + WLM_ADMISSION_CTRL: 'NO' + SHEAPTHRES_SHR: 'automatic' + SORTHEAP: 'automatic' + AUTHN_CACHE_USERS: '100' + AUTHN_CACHE_DURATION: '10' + APPLHEAPSZ: '8192 AUTOMATIC' + db2_instance_dbm_config: + AGENT_STACK_SZ: '1024' + RQRIOBLK: '65535' + HEALTH_MON: 'OFF' + MON_HEAP_SZ: 'AUTOMATIC' + KEEPFENCED: 'NO' + FENCED_POOL: '50' + db2_mln_count: 1 + db2_num_pods: 1 + db2_meta_storage_class: efs-mas-4 + db2_meta_storage_size: 20Gi + db2_meta_storage_accessmode: ReadWriteMany + db2_data_storage_class: efs-mas-4 + db2_data_storage_size: 100Gi + db2_data_storage_accessmode: ReadWriteOnce + db2_backup_storage_class: efs-mas-4 + db2_backup_storage_size: 100Gi + db2_backup_storage_accessmode: ReadWriteMany + db2_logs_storage_class: efs-mas-4 + db2_logs_storage_size: 100Gi + db2_logs_storage_accessmode: ReadWriteOnce + db2_temp_storage_class: efs-mas-4 + db2_temp_storage_size: 100Gi + db2_temp_storage_accessmode: ReadWriteOnce + db2_cpu_requests: 4 + db2_cpu_limits: 6 + db2_memory_requests: 8Gi + db2_memory_limits: 16Gi + cluster_domain: + mas_application_id: manage diff --git a/example-config/dev/cluster1/instance1/ibm-mas-instance-base.yaml b/example-config/dev/cluster1/instance1/ibm-mas-instance-base.yaml new file mode 100644 index 000000000..86d09dd2a --- /dev/null +++ b/example-config/dev/cluster1/instance1/ibm-mas-instance-base.yaml @@ -0,0 +1,18 @@ +merge-key: "dev/cluster1/instance1" + +account: + id: dev + +region: + id: us-east-1 + +cluster: + id: cluster1 + url: https://api.cluster1.cakv.p3.openshiftapps.com:443 + +instance: + id: instance1 + +sm: + aws_access_key_id: "" + aws_secret_access_key: "" diff --git a/example-config/dev/cluster1/instance1/ibm-mas-masapp-configs.yaml b/example-config/dev/cluster1/instance1/ibm-mas-masapp-configs.yaml new file mode 100644 index 000000000..10391fda3 --- /dev/null +++ b/example-config/dev/cluster1/instance1/ibm-mas-masapp-configs.yaml @@ -0,0 +1,105 @@ +merge-key: "dev/cluster1/instance1" +ibm_mas_masapp_configs: + - mas_app_id: manage + mas_app_namespace: mas-instance1-manage + mas_app_ws_apiversion: apps.mas.ibm.com/v1 + mas_app_ws_kind: ManageWorkspace + mas_workspace_id: demo2ws + mas_app_server_bundles_combined_add_server_config: + demo2ws-manage-d--sb0--asc--sn: PD94bWwgdmVyc2lvbj0xLjAgZW5jb2Rpbmc9VVRGLTg/Pgo8c2VydmVyIGRlc2NyaXB0aW9uPSJuZXcgc2VydmVyIGRlbW8yd3MtbWFuYWdlLWQtLXNiMC0tYXNjLS1zbiI+CjxmZWF0dXJlTWFuYWdlcj4KPGZlYXR1cmU+am5kaS0xLjA8L2ZlYXR1cmU+CjxmZWF0dXJlPndhc0ptc0NsaWVudC0yLjA8L2ZlYXR1cmU+CjxmZWF0dXJlPmptc01kYi0zLjI8L2ZlYXR1cmU+CjxmZWF0dXJlPm1kYi0zLjI8L2ZlYXR1cmU+CjwvZmVhdHVyZU1hbmFnZXI+CiAgICA8am1zUXVldWVDb25uZWN0aW9uRmFjdG9yeSBqbmRpTmFtZT0iam1zL21heGltby9pbnQvY2YvaW50Y2YiIGNvbm5lY3Rpb25NYW5hZ2VyUmVmPSJtaWZqbXNjb25mYWN0Ij48cHJvcGVydGllcy53YXNKbXMgcmVtb3RlU2VydmVyQWRkcmVzcz0idXNlYXN0MWEtZGVtbzJ3cy1qbXMubWFzLXVzZWFzdDFhLW1hbmFnZS5zdmM6NzI3NjpCb290c3RyYXBCYXNpY01lc3NhZ2luZyIvPjwvam1zUXVldWVDb25uZWN0aW9uRmFjdG9yeT4KICAgIDxjb25uZWN0aW9uTWFuYWdlciBpZD0ibWlmam1zY29uZmFjdCIgbWF4UG9vbFNpemU9IjIwIi8+CiAgICA8am1zUXVldWUgam5kaU5hbWU9Imptcy9tYXhpbW8vaW50L3F1ZXVlcy9zcW91dCI+PHByb3BlcnRpZXMud2FzSm1zIHF1ZXVlTmFtZT0ic3FvdXRiZCIvPjwvam1zUXVldWU+CiAgICA8am1zUXVldWUgam5kaU5hbWU9Imptcy9tYXhpbW8vaW50L3F1ZXVlcy9zcWluIj48cHJvcGVydGllcy53YXNKbXMgcXVldWVOYW1lPSJzcWluYmQiLz48L2ptc1F1ZXVlPgogICAgPGptc1F1ZXVlIGpuZGlOYW1lPSJqbXMvbWF4aW1vL2ludC9xdWV1ZXMvY3FpbiI+PHByb3BlcnRpZXMud2FzSm1zIHF1ZXVlTmFtZT0iY3FpbmJkIi8+PC9qbXNRdWV1ZT4KICAgIDxqbXNRdWV1ZSBqbmRpTmFtZT0iam1zL21heGltby9pbnQvcXVldWVzL2NxaW5lcnIiPjxwcm9wZXJ0aWVzLndhc0ptcyBxdWV1ZU5hbWU9ImNxaW5lcnJiZCIvPjwvam1zUXVldWU+CiAgICA8am1zUXVldWUgam5kaU5hbWU9Imptcy9tYXhpbW8vaW50L3F1ZXVlcy9jcW91dCI+PHByb3BlcnRpZXMud2FzSm1zIHF1ZXVlTmFtZT0iY3FvdXRiZCIvPjwvam1zUXVldWU+CiAgICA8am1zUXVldWUgam5kaU5hbWU9Imptcy9tYXhpbW8vaW50L3F1ZXVlcy9jcW91dGVyciI+PHByb3BlcnRpZXMud2FzSm1zIHF1ZXVlTmFtZT0iY3FvdXRlcnJiZCIvPjwvam1zUXVldWU+CiAgICA8am1zUXVldWUgam5kaU5hbWU9Imptcy9tYXhpbW8vaW50L3F1ZXVlcy9ub3RmIj48cHJvcGVydGllcy53YXNKbXMgcXVldWVOYW1lPSJub3RmYmQiLz48L2ptc1F1ZXVlPgogICAgPGptc1F1ZXVlIGpuZGlOYW1lPSJqbXMvbWF4aW1vL2ludC9xdWV1ZXMvbm90ZmVyciI+PHByb3BlcnRpZXMud2FzSm1zIHF1ZXVlTmFtZT0ibm90ZmVycmJkIi8+PC9qbXNRdWV1ZT4KPC9zZXJ2ZXI+Cg== + demo2ws-manage-d--sb1--asc--sn: PD94bWwgdmVyc2lvbj0xLjAgZW5jb2Rpbmc9VVRGLTg/Pgo8c2VydmVyIGRlc2NyaXB0aW9uPSJuZXcgc2VydmVyIGRlbW8yd3MtbWFuYWdlLWQtLXNiMS0tYXNjLS1zbiI+CjxmZWF0dXJlTWFuYWdlcj4KPGZlYXR1cmU+am5kaS0xLjA8L2ZlYXR1cmU+CjxmZWF0dXJlPndhc0ptc0NsaWVudC0yLjA8L2ZlYXR1cmU+CjxmZWF0dXJlPmptc01kYi0zLjI8L2ZlYXR1cmU+CjxmZWF0dXJlPm1kYi0zLjI8L2ZlYXR1cmU+CjwvZmVhdHVyZU1hbmFnZXI+CiAgICA8am1zUXVldWVDb25uZWN0aW9uRmFjdG9yeSBqbmRpTmFtZT0iam1zL21heGltby9pbnQvY2YvaW50Y2YiIGNvbm5lY3Rpb25NYW5hZ2VyUmVmPSJtaWZqbXNjb25mYWN0Ij48cHJvcGVydGllcy53YXNKbXMgcmVtb3RlU2VydmVyQWRkcmVzcz0idXNlYXN0MWEtZGVtbzJ3cy1qbXMubWFzLXVzZWFzdDFhLW1hbmFnZS5zdmM6NzI3NjpCb290c3RyYXBCYXNpY01lc3NhZ2luZyIvPjwvam1zUXVldWVDb25uZWN0aW9uRmFjdG9yeT4KICAgIDxjb25uZWN0aW9uTWFuYWdlciBpZD0ibWlmam1zY29uZmFjdCIgbWF4UG9vbFNpemU9IjIwIi8+CiAgICA8am1zUXVldWUgam5kaU5hbWU9Imptcy9tYXhpbW8vaW50L3F1ZXVlcy9zcW91dCI+PHByb3BlcnRpZXMud2FzSm1zIHF1ZXVlTmFtZT0ic3FvdXRiZCIvPjwvam1zUXVldWU+CiAgICA8am1zUXVldWUgam5kaU5hbWU9Imptcy9tYXhpbW8vaW50L3F1ZXVlcy9zcWluIj48cHJvcGVydGllcy53YXNKbXMgcXVldWVOYW1lPSJzcWluYmQiLz48L2ptc1F1ZXVlPgogICAgPGptc1F1ZXVlIGpuZGlOYW1lPSJqbXMvbWF4aW1vL2ludC9xdWV1ZXMvY3FpbiI+PHByb3BlcnRpZXMud2FzSm1zIHF1ZXVlTmFtZT0iY3FpbmJkIi8+PC9qbXNRdWV1ZT4KICAgIDxqbXNRdWV1ZSBqbmRpTmFtZT0iam1zL21heGltby9pbnQvcXVldWVzL2NxaW5lcnIiPjxwcm9wZXJ0aWVzLndhc0ptcyBxdWV1ZU5hbWU9ImNxaW5lcnJiZCIvPjwvam1zUXVldWU+CiAgICA8am1zUXVldWUgam5kaU5hbWU9Imptcy9tYXhpbW8vaW50L3F1ZXVlcy9jcW91dCI+PHByb3BlcnRpZXMud2FzSm1zIHF1ZXVlTmFtZT0iY3FvdXRiZCIvPjwvam1zUXVldWU+CiAgICA8am1zUXVldWUgam5kaU5hbWU9Imptcy9tYXhpbW8vaW50L3F1ZXVlcy9jcW91dGVyciI+PHByb3BlcnRpZXMud2FzSm1zIHF1ZXVlTmFtZT0iY3FvdXRlcnJiZCIvPjwvam1zUXVldWU+CiAgICA8am1zUXVldWUgam5kaU5hbWU9Imptcy9tYXhpbW8vaW50L3F1ZXVlcy9ub3RmIj48cHJvcGVydGllcy53YXNKbXMgcXVldWVOYW1lPSJub3RmYmQiLz48L2ptc1F1ZXVlPgogICAgPGptc1F1ZXVlIGpuZGlOYW1lPSJqbXMvbWF4aW1vL2ludC9xdWV1ZXMvbm90ZmVyciI+PHByb3BlcnRpZXMud2FzSm1zIHF1ZXVlTmFtZT0ibm90ZmVycmJkIi8+PC9qbXNRdWV1ZT4KICAgIDxqbXNBY3RpdmF0aW9uU3BlYyBpZD0ibWF4aW1vbWVhL21ib2VqYi9KTVNDb250UXVldWVQcm9jZXNzb3ItMSIgbWF4RW5kcG9pbnRzPSI1Ij48cHJvcGVydGllcy53YXNKbXMgZGVzdGluYXRpb25Mb29rdXA9Imptcy9tYXhpbW8vaW50L3F1ZXVlcy9jcWluIiBtYXhDb25jdXJyZW5jeT0iNSIgbWF4QmF0Y2hTaXplPSIyMCIgY29ubmVjdGlvbkZhY3RvcnlMb29rdXA9Imptcy9tYXhpbW8vaW50L2NmL2ludGNmIi8+PC9qbXNBY3RpdmF0aW9uU3BlYz4KICAgIDxqbXNBY3RpdmF0aW9uU3BlYyBpZD0ibWF4aW1vbWVhL21ib2VqYi9KTVNDb250UXVldWVQcm9jZXNzb3ItMiIgbWF4RW5kcG9pbnRzPSIxIj48cHJvcGVydGllcy53YXNKbXMgZGVzdGluYXRpb25Mb29rdXA9Imptcy9tYXhpbW8vaW50L3F1ZXVlcy9jcWluZXJyIiBtYXhDb25jdXJyZW5jeT0iMSIgbWF4QmF0Y2hTaXplPSIyMCIgY29ubmVjdGlvbkZhY3RvcnlMb29rdXA9Imptcy9tYXhpbW8vaW50L2NmL2ludGNmIi8+PC9qbXNBY3RpdmF0aW9uU3BlYz4KICAgIDxqbXNBY3RpdmF0aW9uU3BlYyBpZD0ibWF4aW1vbWVhL21ib2VqYi9KTVNDb250T3V0UXVldWVQcm9jZXNzb3ItMSIgbWF4RW5kcG9pbnRzPSI1Ij48cHJvcGVydGllcy53YXNKbXMgZGVzdGluYXRpb25Mb29rdXA9Imptcy9tYXhpbW8vaW50L3F1ZXVlcy9jcW91dCIgbWF4Q29uY3VycmVuY3k9IjUiIG1heEJhdGNoU2l6ZT0iMjAiIGNvbm5lY3Rpb25GYWN0b3J5TG9va3VwPSJqbXMvbWF4aW1vL2ludC9jZi9pbnRjZiIvPjwvam1zQWN0aXZhdGlvblNwZWM+CiAgICA8am1zQWN0aXZhdGlvblNwZWMgaWQ9Im1heGltb21lYS9tYm9lamIvSk1TQ29udE91dFF1ZXVlUHJvY2Vzc29yLTIiIG1heEVuZHBvaW50cz0iMSI+PHByb3BlcnRpZXMud2FzSm1zIGRlc3RpbmF0aW9uTG9va3VwPSJqbXMvbWF4aW1vL2ludC9xdWV1ZXMvY3FvdXRlcnIiIG1heENvbmN1cnJlbmN5PSIxIiBtYXhCYXRjaFNpemU9IjIwIiBjb25uZWN0aW9uRmFjdG9yeUxvb2t1cD0iam1zL21heGltby9pbnQvY2YvaW50Y2YiLz48L2ptc0FjdGl2YXRpb25TcGVjPgo8L3NlcnZlcj4K + demo2ws-manage-d--sb2--asc--sn: PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHNlcnZlciBkZXNjcmlwdGlvbj0ibmV3IHNlcnZlciB1c2Vhc3QxYS1tYW5hZ2UtZC0tc2IyLS1hc2MtLXNuIj4KCiAgPCEtLSBFbmFibGUgZmVhdHVyZXMgLS0+CjxmZWF0dXJlTWFuYWdlcj4KICA8ZmVhdHVyZT53YXNKbXNTZWN1cml0eS0xLjA8L2ZlYXR1cmU+CiAgPGZlYXR1cmU+d2FzSm1zU2VydmVyLTEuMDwvZmVhdHVyZT4KICA8L2ZlYXR1cmVNYW5hZ2VyPgogIDxhcHBsaWNhdGlvbk1hbmFnZXIgYXV0b0V4cGFuZD0idHJ1ZSIvPgogIDx3YXNKbXNFbmRwb2ludCBob3N0PSIqIiB3YXNKbXNTU0xQb3J0PSI3Mjg2IiB3YXNKbXNQb3J0PSI3Mjc2IiAvPgogIDxtZXNzYWdpbmdFbmdpbmU+CiAgPGZpbGVTdG9yZSBwYXRoPSJqbXMvam1zc3RvcmUiLz4KICA8cXVldWUgaWQ9InNxb3V0YmQiIG1haW50YWluU3RyaWN0T3JkZXI9InRydWUiIG1heE1lc3NhZ2VEZXB0aD0iMTAwMDAwIiBmYWlsZWREZWxpdmVyeVBvbGljeT0iS0VFUF9UUllJTkciIG1heFJlZGVsaXZlcnlDb3VudD0iLTEiLz4KICA8cXVldWUgaWQ9InNxaW5iZCIgbWFpbnRhaW5TdHJpY3RPcmRlcj0idHJ1ZSIgbWF4TWVzc2FnZURlcHRoPSIyMDAwMDAiIGZhaWxlZERlbGl2ZXJ5UG9saWN5PSJLRUVQX1RSWUlORyIgbWF4UmVkZWxpdmVyeUNvdW50PSItMSIvPgogIDxxdWV1ZSBpZD0iY3FpbmVycmJkIiBtYXhNZXNzYWdlRGVwdGg9IjEwMDAwMCIgZmFpbGVkRGVsaXZlcnlQb2xpY3k9IktFRVBfVFJZSU5HIi8+CiAgPHF1ZXVlIGlkPSJjcWluYmQiIG1heE1lc3NhZ2VEZXB0aD0iMTAwMDAwIiBleGNlcHRpb25EZXN0aW5hdGlvbj0iY3FpbmVycmJkIi8+CiAgPHF1ZXVlIGlkPSJjcW91dGVycmJkIiBtYXhNZXNzYWdlRGVwdGg9IjEwMDAwMCIgZmFpbGVkRGVsaXZlcnlQb2xpY3k9IktFRVBfVFJZSU5HIi8+CiAgPHF1ZXVlIGlkPSJjcW91dGJkIiBtYXhNZXNzYWdlRGVwdGg9IjEwMDAwMCIgZXhjZXB0aW9uRGVzdGluYXRpb249ImNxb3V0ZXJyYmQiLz4KICA8cXVldWUgaWQ9Im5vdGZlcnJiZCIgbWF4TWVzc2FnZURlcHRoPSIxMDAwMDAiIGZhaWxlZERlbGl2ZXJ5UG9saWN5PSJLRUVQX1RSWUlORyIvPgogIDxxdWV1ZSBpZD0ibm90ZmJkIiBtYXhNZXNzYWdlRGVwdGg9IjEwMDAwMCIgZXhjZXB0aW9uRGVzdGluYXRpb249Im5vdGZlcnJiZCIvPgogIDwvbWVzc2FnaW5nRW5naW5lPgo8L3NlcnZlcj4K + demo2ws-manage-d--sb3--asc--sn: PD94bWwgdmVyc2lvbj0xLjAgZW5jb2Rpbmc9VVRGLTg/Pgo8c2VydmVyIGRlc2NyaXB0aW9uPSJuZXcgc2VydmVyIGRlbW8yd3MtbWFuYWdlLWQtLXNiMy0tYXNjLS1zbiI+CjxmZWF0dXJlTWFuYWdlcj4KPGZlYXR1cmU+am5kaS0xLjA8L2ZlYXR1cmU+CjxmZWF0dXJlPndhc0ptc0NsaWVudC0yLjA8L2ZlYXR1cmU+CjxmZWF0dXJlPmptc01kYi0zLjI8L2ZlYXR1cmU+CjxmZWF0dXJlPm1kYi0zLjI8L2ZlYXR1cmU+CjwvZmVhdHVyZU1hbmFnZXI+CiAgICA8am1zUXVldWVDb25uZWN0aW9uRmFjdG9yeSBqbmRpTmFtZT0iam1zL21heGltby9pbnQvY2YvaW50Y2YiIGNvbm5lY3Rpb25NYW5hZ2VyUmVmPSJtaWZqbXNjb25mYWN0Ij48cHJvcGVydGllcy53YXNKbXMgcmVtb3RlU2VydmVyQWRkcmVzcz0idXNlYXN0MWEtZGVtbzJ3cy1qbXMubWFzLXVzZWFzdDFhLW1hbmFnZS5zdmM6NzI3NjpCb290c3RyYXBCYXNpY01lc3NhZ2luZyIvPjwvam1zUXVldWVDb25uZWN0aW9uRmFjdG9yeT4KICAgIDxjb25uZWN0aW9uTWFuYWdlciBpZD0ibWlmam1zY29uZmFjdCIgbWF4UG9vbFNpemU9IjIwIi8+CiAgICA8am1zUXVldWUgam5kaU5hbWU9Imptcy9tYXhpbW8vaW50L3F1ZXVlcy9zcW91dCI+PHByb3BlcnRpZXMud2FzSm1zIHF1ZXVlTmFtZT0ic3FvdXRiZCIvPjwvam1zUXVldWU+CiAgICA8am1zUXVldWUgam5kaU5hbWU9Imptcy9tYXhpbW8vaW50L3F1ZXVlcy9zcWluIj48cHJvcGVydGllcy53YXNKbXMgcXVldWVOYW1lPSJzcWluYmQiLz48L2ptc1F1ZXVlPgogICAgPGptc1F1ZXVlIGpuZGlOYW1lPSJqbXMvbWF4aW1vL2ludC9xdWV1ZXMvY3FpbiI+PHByb3BlcnRpZXMud2FzSm1zIHF1ZXVlTmFtZT0iY3FpbmJkIi8+PC9qbXNRdWV1ZT4KICAgIDxqbXNRdWV1ZSBqbmRpTmFtZT0iam1zL21heGltby9pbnQvcXVldWVzL2NxaW5lcnIiPjxwcm9wZXJ0aWVzLndhc0ptcyBxdWV1ZU5hbWU9ImNxaW5lcnJiZCIvPjwvam1zUXVldWU+CiAgICA8am1zUXVldWUgam5kaU5hbWU9Imptcy9tYXhpbW8vaW50L3F1ZXVlcy9jcW91dCI+PHByb3BlcnRpZXMud2FzSm1zIHF1ZXVlTmFtZT0iY3FvdXRiZCIvPjwvam1zUXVldWU+CiAgICA8am1zUXVldWUgam5kaU5hbWU9Imptcy9tYXhpbW8vaW50L3F1ZXVlcy9jcW91dGVyciI+PHByb3BlcnRpZXMud2FzSm1zIHF1ZXVlTmFtZT0iY3FvdXRlcnJiZCIvPjwvam1zUXVldWU+CiAgICA8am1zUXVldWUgam5kaU5hbWU9Imptcy9tYXhpbW8vaW50L3F1ZXVlcy9ub3RmIj48cHJvcGVydGllcy53YXNKbXMgcXVldWVOYW1lPSJub3RmYmQiLz48L2ptc1F1ZXVlPgogICAgPGptc1F1ZXVlIGpuZGlOYW1lPSJqbXMvbWF4aW1vL2ludC9xdWV1ZXMvbm90ZmVyciI+PHByb3BlcnRpZXMud2FzSm1zIHF1ZXVlTmFtZT0ibm90ZmVycmJkIi8+PC9qbXNRdWV1ZT4KICAgIDxqbXNBY3RpdmF0aW9uU3BlYyBpZD0ibWF4aW1vbWVhL21ib2VqYi9KTVNDb250UXVldWVQcm9jZXNzb3ItMSIgbWF4RW5kcG9pbnRzPSI1Ij48cHJvcGVydGllcy53YXNKbXMgZGVzdGluYXRpb25Mb29rdXA9Imptcy9tYXhpbW8vaW50L3F1ZXVlcy9jcWluIiBtYXhDb25jdXJyZW5jeT0iNSIgbWF4QmF0Y2hTaXplPSIyMCIgY29ubmVjdGlvbkZhY3RvcnlMb29rdXA9Imptcy9tYXhpbW8vaW50L2NmL2ludGNmIi8+PC9qbXNBY3RpdmF0aW9uU3BlYz4KICAgIDxqbXNBY3RpdmF0aW9uU3BlYyBpZD0ibWF4aW1vbWVhL21ib2VqYi9KTVNDb250UXVldWVQcm9jZXNzb3ItMiIgbWF4RW5kcG9pbnRzPSIxIj48cHJvcGVydGllcy53YXNKbXMgZGVzdGluYXRpb25Mb29rdXA9Imptcy9tYXhpbW8vaW50L3F1ZXVlcy9jcWluZXJyIiBtYXhDb25jdXJyZW5jeT0iMSIgbWF4QmF0Y2hTaXplPSIyMCIgY29ubmVjdGlvbkZhY3RvcnlMb29rdXA9Imptcy9tYXhpbW8vaW50L2NmL2ludGNmIi8+PC9qbXNBY3RpdmF0aW9uU3BlYz4KICAgIDxqbXNBY3RpdmF0aW9uU3BlYyBpZD0ibWF4aW1vbWVhL21ib2VqYi9KTVNDb250T3V0UXVldWVQcm9jZXNzb3ItMSIgbWF4RW5kcG9pbnRzPSI1Ij48cHJvcGVydGllcy53YXNKbXMgZGVzdGluYXRpb25Mb29rdXA9Imptcy9tYXhpbW8vaW50L3F1ZXVlcy9jcW91dCIgbWF4Q29uY3VycmVuY3k9IjUiIG1heEJhdGNoU2l6ZT0iMjAiIGNvbm5lY3Rpb25GYWN0b3J5TG9va3VwPSJqbXMvbWF4aW1vL2ludC9jZi9pbnRjZiIvPjwvam1zQWN0aXZhdGlvblNwZWM+CiAgICA8am1zQWN0aXZhdGlvblNwZWMgaWQ9Im1heGltb21lYS9tYm9lamIvSk1TQ29udE91dFF1ZXVlUHJvY2Vzc29yLTIiIG1heEVuZHBvaW50cz0iMSI+PHByb3BlcnRpZXMud2FzSm1zIGRlc3RpbmF0aW9uTG9va3VwPSJqbXMvbWF4aW1vL2ludC9xdWV1ZXMvY3FvdXRlcnIiIG1heENvbmN1cnJlbmN5PSIxIiBtYXhCYXRjaFNpemU9IjIwIiBjb25uZWN0aW9uRmFjdG9yeUxvb2t1cD0iam1zL21heGltby9pbnQvY2YvaW50Y2YiLz48L2ptc0FjdGl2YXRpb25TcGVjPgo8L3NlcnZlcj4K + demo2ws-manage-d--sb4--asc--sn: PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHNlcnZlciBkZXNjcmlwdGlvbj0ibmV3IHNlcnZlciBkZW1vMndzLW1hbmFnZS1kLS1zYjQtLWFzYy0tc24iPgoKICA8IS0tIEVuYWJsZSBmZWF0dXJlcyAtLT4KPGZlYXR1cmVNYW5hZ2VyPgogIDxmZWF0dXJlPndhc0ptc1NlY3VyaXR5LTEuMDwvZmVhdHVyZT4KICA8ZmVhdHVyZT53YXNKbXNTZXJ2ZXItMS4wPC9mZWF0dXJlPgogIDwvZmVhdHVyZU1hbmFnZXI+CiAgPGFwcGxpY2F0aW9uTWFuYWdlciBhdXRvRXhwYW5kPSJ0cnVlIi8+CiAgPHdhc0ptc0VuZHBvaW50IGhvc3Q9IioiIHdhc0ptc1NTTFBvcnQ9IjcyODYiIHdhc0ptc1BvcnQ9IjcyNzYiIC8+CiAgPG1lc3NhZ2luZ0VuZ2luZT4KICA8ZmlsZVN0b3JlIHBhdGg9Imptcy9qbXNzdG9yZSIvPgogIDxxdWV1ZSBpZD0ic3FvdXRiZCIgbWFpbnRhaW5TdHJpY3RPcmRlcj0idHJ1ZSIgbWF4TWVzc2FnZURlcHRoPSIxMDAwMDAiIGZhaWxlZERlbGl2ZXJ5UG9saWN5PSJLRUVQX1RSWUlORyIgbWF4UmVkZWxpdmVyeUNvdW50PSItMSIvPgogIDxxdWV1ZSBpZD0ic3FpbmJkIiBtYWludGFpblN0cmljdE9yZGVyPSJ0cnVlIiBtYXhNZXNzYWdlRGVwdGg9IjIwMDAwMCIgZmFpbGVkRGVsaXZlcnlQb2xpY3k9IktFRVBfVFJZSU5HIiBtYXhSZWRlbGl2ZXJ5Q291bnQ9Ii0xIi8+CiAgPHF1ZXVlIGlkPSJjcWluZXJyYmQiIG1heE1lc3NhZ2VEZXB0aD0iMTAwMDAwIiBmYWlsZWREZWxpdmVyeVBvbGljeT0iS0VFUF9UUllJTkciLz4KICA8cXVldWUgaWQ9ImNxaW5iZCIgbWF4TWVzc2FnZURlcHRoPSIxMDAwMDAiIGV4Y2VwdGlvbkRlc3RpbmF0aW9uPSJjcWluZXJyYmQiLz4KICA8cXVldWUgaWQ9ImNxb3V0ZXJyYmQiIG1heE1lc3NhZ2VEZXB0aD0iMTAwMDAwIiBmYWlsZWREZWxpdmVyeVBvbGljeT0iS0VFUF9UUllJTkciLz4KICA8cXVldWUgaWQ9ImNxb3V0YmQiIG1heE1lc3NhZ2VEZXB0aD0iMTAwMDAwIiBleGNlcHRpb25EZXN0aW5hdGlvbj0iY3FvdXRlcnJiZCIvPgogIDxxdWV1ZSBpZD0ibm90ZmVycmJkIiBtYXhNZXNzYWdlRGVwdGg9IjEwMDAwMCIgZmFpbGVkRGVsaXZlcnlQb2xpY3k9IktFRVBfVFJZSU5HIi8+CiAgPHF1ZXVlIGlkPSJub3RmYmQiIG1heE1lc3NhZ2VEZXB0aD0iMTAwMDAwIiBleGNlcHRpb25EZXN0aW5hdGlvbj0ibm90ZmVycmJkIi8+CiAgPC9tZXNzYWdpbmdFbmdpbmU+Cjwvc2VydmVyPgo= + customization_archive_secret_names: + mas_appws_spec: + bindings: + jdbc: workspace-application + components: + base: + version: latest + settings: + aio: + install: true + db: + dbSchema: maximo + maxinst: + bypassUpgradeVersionCheck: false + db2Vargraphic: true + demodata: false + indexSpace: MAXINDEX + tableSpace: MAXDATA + deployment: + buildTag: latest + defaultJMS: true + mode: up + persistentVolumes: + - accessModes: + - ReadWriteMany + mountPath: /DOCLINKS + pvcName: manage-doclinks + size: 20Gi + storageClassName: efs-instance1 + - accessModes: + - ReadWriteMany + mountPath: /bim + pvcName: manage-bim + size: 20Gi + storageClassName: efs-instance1 + - accessModes: + - ReadWriteMany + mountPath: /jms + pvcName: manage-jms + size: 20Gi + storageClassName: efs-instance1 + serverBundles: + - additionalServerConfig: + secretName: demo2ws-manage-d--sb0--asc--sn + bundleType: ui + isDefault: true + isMobileTarget: true + isUserSyncTarget: false + name: ui + replica: 1 + routeSubDomain: ui + - additionalServerConfig: + secretName: demo2ws-manage-d--sb1--asc--sn + bundleType: mea + isDefault: false + isMobileTarget: false + isUserSyncTarget: true + name: mea + replica: 1 + routeSubDomain: mea + - additionalServerConfig: + secretName: demo2ws-manage-d--sb2--asc--sn + bundleType: report + isDefault: false + isMobileTarget: false + isUserSyncTarget: false + name: rpt + replica: 1 + routeSubDomain: rpt + - additionalServerConfig: + secretName: demo2ws-manage-d--sb3--asc--sn + bundleType: cron + isDefault: false + isMobileTarget: false + isUserSyncTarget: false + name: cron + replica: 1 + routeSubDomain: cron + - additionalServerConfig: + secretName: demo2ws-manage-d--sb4--asc--sn + bundleType: standalonejms + isDefault: false + isMobileTarget: false + isUserSyncTarget: false + name: jms + replica: 1 + routeSubDomain: jms + serverTimezone: GMT + languages: + baseLang: EN + secondaryLangs: [] + mas_manual_cert_mgmt: diff --git a/example-config/dev/cluster1/instance1/ibm-mas-masapp-manage-install.yaml b/example-config/dev/cluster1/instance1/ibm-mas-masapp-manage-install.yaml new file mode 100644 index 000000000..7cb0a905b --- /dev/null +++ b/example-config/dev/cluster1/instance1/ibm-mas-masapp-manage-install.yaml @@ -0,0 +1,23 @@ +merge-key: "dev/cluster1/instance1" + +ibm_suite_app_manage_install: + ibm_entitlement_key: + mas_instance_id: instance1 + mas_app_id: manage + mas_edition: essentials-maintenance + mas_app_namespace: mas-instance1-manage + mas_app_channel: 8.7.x + mas_app_catalog_source: ibm-operator-catalog + mas_app_api_version: apps.mas.ibm.com/v1 + mas_app_kind: ManageApp + run_sync_hooks: true + mas_app_spec: {} + + mas_manual_cert_mgmt: + + + + + + + diff --git a/example-config/dev/cluster1/instance1/ibm-mas-suite-configs.yaml b/example-config/dev/cluster1/instance1/ibm-mas-suite-configs.yaml new file mode 100644 index 000000000..85a2dce6c --- /dev/null +++ b/example-config/dev/cluster1/instance1/ibm-mas-suite-configs.yaml @@ -0,0 +1,151 @@ +merge-key: "dev/cluster1/instance1" +ibm_mas_suite_configs: + - mas_config_name: "instance1-bas-system" + mas_config_chart: ibm-mas-bas-config + mas_config_scope: system + mas_workspace_id: + mas_application_id: + mas_config_kind: "bascfgs" + mas_config_api_version: "config.mas.ibm.com" + use_postdelete_hooks: true + dro_api_token: + dro_endpoint_url: + mas_segment_key: + dro_contact: + email: email.com + first_name: joe + last_name: bloggs + dro_ca: + crt: | + -----BEGIN CERTIFICATE----- + MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw + TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh + cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4 + WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu + ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY + MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc + h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+ + 0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U + A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW + T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH + B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC + B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv + KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn + OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn + jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw + qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI + rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV + HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq + hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL + ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ + 3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK + NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5 + ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur + TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC + jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc + oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq + 4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA + mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d + emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc= + -----END CERTIFICATE----- + - mas_config_name: "instance1-jdbc-wsapp-demo2ws-manage" + mas_config_chart: ibm-db2u-jdbc-config + mas_config_scope: wsapp + mas_workspace_id: demo2ws + mas_application_id: manage + mas_config_kind: "jdbccfgs" + mas_config_api_version: "config.mas.ibm.com" + use_postdelete_hooks: true + db2_instance_name: db2wh-instance1-manage + db2_jdbc_username: + jdbc_instance_password: + mas_config_dir: + db2_tls_serviceport: + tls_version: TLSv1.2 + db2_dbname: BLUDB + db2_namespace: db2u + app_suite_jdbccfg_labels: + mas.ibm.com/applicationId: "manage" + mas.ibm.com/configScope: application + mas.ibm.com/instanceId: "instance1" + system_suite_jdbccfg_labels: + mas.ibm.com/configScope: system + mas.ibm.com/instanceId: "instance1" + ws_suite_jdbccfg_labels: + mas.ibm.com/configScope: workspace-application + mas.ibm.com/instanceId: "instance1" + mas.ibm.com/workspaceId: "demo2ws" + wsapp_suite_jdbccfg_labels: + mas.ibm.com/applicationId: "manage" + mas.ibm.com/configScope: workspace-application + mas.ibm.com/instanceId: "instance1" + mas.ibm.com/workspaceId: "demo2ws" + db2_ca_pem: + crt: | + + - mas_config_name: "instance1-mongo-system" + mas_config_chart: ibm-mas-mongo-config + mas_config_scope: system + mas_workspace_id: + mas_application_id: + mas_config_kind: "mongocfgs" + mas_config_api_version: "config.mas.ibm.com" + use_postdelete_hooks: true + username: + password: + config: + hosts: + - host: instance1-mongohost.com + port: 27017 + configDb: admin + authMechanism: DEFAULT + retryWrites: false + credentials: + secretName: "system-mongo-credentials" + certificates: + - alias: ca + crt: | + -----BEGIN CERTIFICATE----- + MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw + TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh + cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4 + WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu + ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY + MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc + h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+ + 0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U + A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW + T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH + B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC + B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv + KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn + OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn + jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw + qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI + rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV + HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq + hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL + ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ + 3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK + NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5 + ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur + TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC + jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc + oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq + 4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA + mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d + emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc= + -----END CERTIFICATE----- + - mas_config_name: "instance1-sls-system" + mas_config_chart: ibm-mas-sls-config + mas_config_scope: system + mas_workspace_id: + mas_application_id: + mas_config_kind: "slscfgs" + mas_config_api_version: "config.mas.ibm.com" + use_postdelete_hooks: true + registration_key: + url: "https://sls.mas-instance1-sls.svc" + ca: + crt: | + diff --git a/example-config/dev/cluster1/instance1/ibm-mas-suite.yaml b/example-config/dev/cluster1/instance1/ibm-mas-suite.yaml new file mode 100644 index 000000000..53892a39c --- /dev/null +++ b/example-config/dev/cluster1/instance1/ibm-mas-suite.yaml @@ -0,0 +1,12 @@ +merge-key: "dev/cluster1/instance1" + +ibm_mas_suite: + cert_manager_namespace: "cert-manager" + ibm_entitlement_key: "" + domain: "useast1a.apps.rosa.mas-4.cakv.p3.openshiftapps.com" + + mas_channel: "8.11.x" + icr_cp: "cp.icr.io/cp" + icr_cp_open: "icr.io/cpopen" + + mas_manual_cert_mgmt: "False" diff --git a/example-config/dev/cluster1/instance1/ibm-mas-workspaces.yaml b/example-config/dev/cluster1/instance1/ibm-mas-workspaces.yaml new file mode 100644 index 000000000..d62021490 --- /dev/null +++ b/example-config/dev/cluster1/instance1/ibm-mas-workspaces.yaml @@ -0,0 +1,4 @@ +merge-key: "dev/cluster1/instance1" +ibm_mas_workspaces: + - mas_workspace_id: demo2ws + mas_workspace_name: demo2 workspace diff --git a/example-config/dev/cluster1/instance1/ibm-sls.yaml b/example-config/dev/cluster1/instance1/ibm-sls.yaml new file mode 100644 index 000000000..b2cefe8b5 --- /dev/null +++ b/example-config/dev/cluster1/instance1/ibm-sls.yaml @@ -0,0 +1,64 @@ +merge-key: "dev/cluster1/instance1" + +ibm_sls: + sls_channel: 3.x + sls_entitlement_file: + ibm_entitlement_key: + + # aws docdb + mongodb_provider: "aws" + user_action: "add" + docdb_host: "" + docdb_port: "" + docdb_master_username: "" + docdb_master_password: "" + docdb_master_info: "" + sls_mongo_username: "" + sls_mongo_password: "" + + sls_mongo_secret_name: sls-mongo-credentials + icr_cp_open: "icr.io/cpopen" + run_sync_hooks: true + mongo_spec: + authMechanism: DEFAULT + configDb: admin + secretName: sls-mongo-credentials + retryWrites: false + nodes: + hosts: + - host: instance1-mongohost.com + port: 27017 + certificates: + - alias: g1root + crt: | + -----BEGIN CERTIFICATE----- + MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw + TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh + cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4 + WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu + ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY + MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc + h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+ + 0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U + A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW + T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH + B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC + B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv + KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn + OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn + jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw + qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI + rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV + HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq + hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL + ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ + 3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK + NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5 + ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur + TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC + jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc + oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq + 4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA + mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d + emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc= + -----END CERTIFICATE----- diff --git a/example-config/dev/cluster1/redhat-cert-manager.yaml b/example-config/dev/cluster1/redhat-cert-manager.yaml new file mode 100644 index 000000000..cb95bdeef --- /dev/null +++ b/example-config/dev/cluster1/redhat-cert-manager.yaml @@ -0,0 +1,5 @@ +merge-key: "dev/cluster1" + +redhat_cert_manager: + run_sync_hooks: true + channel: stable-v1 diff --git a/instance-applications/000-ibm-sync-resources/Chart.yaml b/instance-applications/000-ibm-sync-resources/Chart.yaml new file mode 100644 index 000000000..a5fb115a4 --- /dev/null +++ b/instance-applications/000-ibm-sync-resources/Chart.yaml @@ -0,0 +1,7 @@ +apiVersion: v2 +name: ibm-sync-resources +description: | + Establishes resources necessary for running PostDelete hooks from Applications in later syncwaves. + This ensures that these resources are not deleted before the PostDelete hooks get a chance to run. +type: application +version: 1.0.0 diff --git a/instance-applications/000-ibm-sync-resources/README.md b/instance-applications/000-ibm-sync-resources/README.md new file mode 100644 index 000000000..da0d4eaa0 --- /dev/null +++ b/instance-applications/000-ibm-sync-resources/README.md @@ -0,0 +1,9 @@ +IBM MAS Sync Resources +=============================================================================== +Instantiated by the /gitops/root-applications/ibm-mas-instance-root/templates/90-ibm-sync-resources.yaml root application. + +Various resources required to run Jobs contained in the 91-ibm-sync-jobs chart. +This application has a lower syncwave (90) than that of the 91-ibm-sync-jobs application responsible for running the jobs. +This is to ensure that the resources to persist long enough for the PostDelete hooks in that 91-ibm-sync-jobs to complete, +while still being cleaned up successfully when MAS instance is deprovisioned. + diff --git a/instance-applications/000-ibm-sync-resources/templates/00-Namespace.yaml b/instance-applications/000-ibm-sync-resources/templates/00-Namespace.yaml new file mode 100644 index 000000000..922fecc55 --- /dev/null +++ b/instance-applications/000-ibm-sync-resources/templates/00-Namespace.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: mas-{{ .Values.instance_id }}-syncres + annotations: + argocd.argoproj.io/sync-wave: "00" + labels: + argocd.argoproj.io/managed-by: {{ .Values.argo_namespace }} +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} diff --git a/instance-applications/000-ibm-sync-resources/templates/01-02-aws-docdb-user-job_ServiceAccount.yaml b/instance-applications/000-ibm-sync-resources/templates/01-02-aws-docdb-user-job_ServiceAccount.yaml new file mode 100644 index 000000000..a7d69d23b --- /dev/null +++ b/instance-applications/000-ibm-sync-resources/templates/01-02-aws-docdb-user-job_ServiceAccount.yaml @@ -0,0 +1,49 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: aws-docdb-user-job + namespace: mas-{{ .Values.instance_id }}-syncres + annotations: + argocd.argoproj.io/sync-wave: "01" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} + + +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: aws-docdb-user-job-role + namespace: mas-{{ .Values.instance_id }}-syncres + annotations: + argocd.argoproj.io/sync-wave: "01" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +rules: [] +# No special K8S access required at present by these Jobs + +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: aws-docdb-user-job-rolebinding + namespace: mas-{{ .Values.instance_id }}-syncres + annotations: + argocd.argoproj.io/sync-wave: "02" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +subjects: + - kind: ServiceAccount + name: aws-docdb-user-job + namespace: mas-{{ .Values.instance_id }}-syncres +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: aws-docdb-user-job-role diff --git a/instance-applications/000-ibm-sync-resources/templates/01-02-ibm-suite-certs-job_ServiceAccount.yaml b/instance-applications/000-ibm-sync-resources/templates/01-02-ibm-suite-certs-job_ServiceAccount.yaml new file mode 100644 index 000000000..eba3b7d51 --- /dev/null +++ b/instance-applications/000-ibm-sync-resources/templates/01-02-ibm-suite-certs-job_ServiceAccount.yaml @@ -0,0 +1,32 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ibm-suite-certs-job + namespace: mas-{{ .Values.instance_id }}-syncres + annotations: + argocd.argoproj.io/sync-wave: "01" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: ibm-suite-certs-job-cluster-rolebinding-{{ .Values.instance_id }} + annotations: + argocd.argoproj.io/sync-wave: "02" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +subjects: + - kind: ServiceAccount + name: ibm-suite-certs-job + namespace: mas-{{ .Values.instance_id }}-syncres +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin diff --git a/instance-applications/000-ibm-sync-resources/templates/01-02-ibm-suite-dns-job_ServiceAccount.yaml b/instance-applications/000-ibm-sync-resources/templates/01-02-ibm-suite-dns-job_ServiceAccount.yaml new file mode 100644 index 000000000..9da7dbb7e --- /dev/null +++ b/instance-applications/000-ibm-sync-resources/templates/01-02-ibm-suite-dns-job_ServiceAccount.yaml @@ -0,0 +1,33 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ibm-suite-dns-job + namespace: mas-{{ .Values.instance_id }}-syncres + annotations: + argocd.argoproj.io/sync-wave: "01" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: ibm-suite-dns-job-cluster-rolebinding-{{ .Values.instance_id }} + annotations: + argocd.argoproj.io/sync-wave: "02" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} + +subjects: + - kind: ServiceAccount + name: ibm-suite-dns-job + namespace: mas-{{ .Values.instance_id }}-syncres +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin diff --git a/instance-applications/000-ibm-sync-resources/templates/01-aws-docdb_Secret.yaml b/instance-applications/000-ibm-sync-resources/templates/01-aws-docdb_Secret.yaml new file mode 100644 index 000000000..73401f229 --- /dev/null +++ b/instance-applications/000-ibm-sync-resources/templates/01-aws-docdb_Secret.yaml @@ -0,0 +1,21 @@ +{{- if not (empty .Values.docdb) }} +--- +kind: Secret +apiVersion: v1 +metadata: + name: aws-docdb + namespace: mas-{{ .Values.instance_id }}-syncres + annotations: + argocd.argoproj.io/sync-wave: "01" + avp.kubernetes.io/remove-missing: "true" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +stringData: + docdb_master_username: {{ .Values.docdb.master_username }} + docdb_master_password: {{ .Values.docdb.master_password }} + docdb_master_info: {{ .Values.docdb.master_info }} + docdb_instance_password: {{ .Values.docdb.instance_password }} +type: Opaque +{{- end }} diff --git a/instance-applications/000-ibm-sync-resources/templates/01-aws_Secret.yaml b/instance-applications/000-ibm-sync-resources/templates/01-aws_Secret.yaml new file mode 100644 index 000000000..f8c754078 --- /dev/null +++ b/instance-applications/000-ibm-sync-resources/templates/01-aws_Secret.yaml @@ -0,0 +1,17 @@ +--- +kind: Secret +apiVersion: v1 +metadata: + name: aws + namespace: mas-{{ .Values.instance_id}}-syncres + annotations: + argocd.argoproj.io/sync-wave: "01" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +stringData: + aws_access_key_id: {{ .Values.sm_aws_access_key_id }} + aws_secret_access_key: {{ .Values.sm_aws_secret_access_key }} + aws_default_region: {{ .Values.sm_aws_region }} +type: Opaque diff --git a/instance-applications/000-ibm-sync-resources/templates/01-ibm-suite-certs_Secret.yaml b/instance-applications/000-ibm-sync-resources/templates/01-ibm-suite-certs_Secret.yaml new file mode 100644 index 000000000..9bd753496 --- /dev/null +++ b/instance-applications/000-ibm-sync-resources/templates/01-ibm-suite-certs_Secret.yaml @@ -0,0 +1,23 @@ +{{- if and (eq .Values.dns_provider "cis") (eq .Values.mas_manual_cert_mgmt "True") }} +--- +kind: Secret +apiVersion: v1 +metadata: + name: ibm-suite-certs + namespace: mas-{{ .Values.instance_id }}-syncres + annotations: + argocd.argoproj.io/sync-wave: "01" + avp.kubernetes.io/remove-missing: "true" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} + +stringData: + cis_apikey: {{ .Values.cis_apikey }} + manual_certs.yaml: | + manual_certs: +{{ .Values.manual_certs | toYaml | indent 6 }} +type: Opaque + +{{- end }} diff --git a/instance-applications/000-ibm-sync-resources/templates/01-ibm-suite-dns_Secret.yaml b/instance-applications/000-ibm-sync-resources/templates/01-ibm-suite-dns_Secret.yaml new file mode 100644 index 000000000..70de35285 --- /dev/null +++ b/instance-applications/000-ibm-sync-resources/templates/01-ibm-suite-dns_Secret.yaml @@ -0,0 +1,22 @@ +{{- if and (eq .Values.dns_provider "cis") (eq .Values.mas_manual_cert_mgmt "False") }} +--- +kind: Secret +apiVersion: v1 +metadata: + name: ibm-suite-dns + namespace: mas-{{ .Values.instance_id }}-syncres + annotations: + argocd.argoproj.io/sync-wave: "01" + avp.kubernetes.io/remove-missing: "true" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} + +stringData: + cis_apikey: {{ .Values.cis_apikey }} + sm_aws_access_key_id: {{ .Values.sm_aws_access_key_id }} + sm_aws_secret_access_key: {{ .Values.sm_aws_secret_access_key }} +type: Opaque + +{{- end }} diff --git a/instance-applications/000-ibm-sync-resources/templates/01-sync-np_NetworkPolicy.yaml b/instance-applications/000-ibm-sync-resources/templates/01-sync-np_NetworkPolicy.yaml new file mode 100644 index 000000000..0e624a881 --- /dev/null +++ b/instance-applications/000-ibm-sync-resources/templates/01-sync-np_NetworkPolicy.yaml @@ -0,0 +1,22 @@ +--- +# Permit outbound communication by the Job pods +# (Needed to communicate with the K8S HTTP API and AWS SM) +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: sync-role-network-policy + namespace: mas-{{ .Values.instance_id }}-syncres + annotations: + argocd.argoproj.io/sync-wave: "01" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + podSelector: + matchLabels: + app: "sync-job" + egress: + - {} + policyTypes: + - Egress \ No newline at end of file diff --git a/instance-applications/000-ibm-sync-resources/values.yaml b/instance-applications/000-ibm-sync-resources/values.yaml new file mode 100644 index 000000000..ed97d539c --- /dev/null +++ b/instance-applications/000-ibm-sync-resources/values.yaml @@ -0,0 +1 @@ +--- diff --git a/instance-applications/010-ibm-sync-jobs/Chart.yaml b/instance-applications/010-ibm-sync-jobs/Chart.yaml new file mode 100644 index 000000000..0762ea393 --- /dev/null +++ b/instance-applications/010-ibm-sync-jobs/Chart.yaml @@ -0,0 +1,9 @@ +apiVersion: v2 +name: ibm-sync-jobs +description: | + Jobs that perform various setup/teardown tasks at the MAS instance level. + NOTE: supported by resources (RBAC, NetworkPolicies, Secrets, etc) in the 90-ibm-sync-resources chart. + Supporting resources MUST be managede by an ArgoCD application in a lower syncwave so they are left intact during + execution of the PostDelete hooks in this chart. +type: application +version: 1.0.0 diff --git a/instance-applications/010-ibm-sync-jobs/README.md b/instance-applications/010-ibm-sync-jobs/README.md new file mode 100644 index 000000000..08d869aea --- /dev/null +++ b/instance-applications/010-ibm-sync-jobs/README.md @@ -0,0 +1,8 @@ +IBM MAS Sync Jobs +=============================================================================== +Instantiated by the /gitops/root-applications/ibm-mas-instance-root/templates/91-ibm-sync-jobs.yaml root application. + +Defines Jobs to perform various tasks that need to happen before ibm-sls and the suite are installed, and after they are removed. + +Supporting resources are defined in the 90-ibm-sync-resources chart which is managed by an application with a lower syncwave (90). +This is to ensure that these resources perist long enough for any PostDelete hooks in this chart to complete. diff --git a/instance-applications/010-ibm-sync-jobs/templates/00-aws-docdb-add-user_Job.yaml b/instance-applications/010-ibm-sync-jobs/templates/00-aws-docdb-add-user_Job.yaml new file mode 100644 index 000000000..84ab970e2 --- /dev/null +++ b/instance-applications/010-ibm-sync-jobs/templates/00-aws-docdb-add-user_Job.yaml @@ -0,0 +1,153 @@ +{{- if not (empty .Values.docdb) }} +--- + +# Username of generated user will always be "masinst_${MAS_INSTANCE_ID}" +# Password will be set to whatever is in the acc/cluster/instance/mongo#password secret at time of sync, or generated if the secret is not set +# adler32sum of docdb config is included in job name; this means it will be rerun whenever docdb config changes +# The job is idempotent in cases there the config change is inconsequential (e.g. a cert alias is changed). +# If the config is modified to point to a different docdb instance, the user created in the previous DocDB will not be cleaned up + +apiVersion: batch/v1 +kind: Job +metadata: + name: aws-docdb-add-user-{{ .Values.docdb | toYaml | adler32sum }} + namespace: mas-{{ .Values.instance_id }}-syncres + annotations: + argocd.argoproj.io/sync-wave: "00" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + template: + metadata: + labels: + app: "sync-job" +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 8 }} +{{- end }} + spec: + containers: + - name: aws-docdb-process-user + image: quay.io/ibmmas/cli:9.4.0-pre.gitops + imagePullPolicy: IfNotPresent + env: + + - name: ACCOUNT_ID + value: "{{ .Values.account_id }}" + - name: CLUSTER_ID + value: "{{ .Values.cluster_id }}" + - name: MAS_INSTANCE_ID + value: "{{ .Values.instance_id }}" + + - name: DOCDB_MASTER_USERNAME + valueFrom: + secretKeyRef: + name: aws-docdb + key: docdb_master_username + - name: DOCDB_MASTER_PASSWORD + valueFrom: + secretKeyRef: + name: aws-docdb + key: docdb_master_password + - name: DOCDB_INSTANCE_PASSWORD + valueFrom: + secretKeyRef: + name: aws-docdb + key: docdb_instance_password + optional: true + - name: DOCDB_MASTER_INFO + valueFrom: + secretKeyRef: + name: aws-docdb + key: docdb_master_info + - name: SM_AWS_REGION + valueFrom: + secretKeyRef: + name: aws + key: aws_default_region + - name: SM_AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: aws + key: aws_access_key_id + - name: SM_AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: aws + key: aws_secret_access_key + + command: + - /bin/sh + - -c + - | + + set -e + + + echo + echo "================================================================================" + echo "/opt/app-root/src/run-role.sh aws_documentdb_user" + echo "================================================================================" + + export MAS_CONFIG_DIR="/tmp/${MAS_INSTANCE_ID}/aws_documentdb_user" + OUTPUT_FILE=${MAS_CONFIG_DIR}/docdb-${MAS_INSTANCE_ID}-instance-credentials.yml + export USER_ACTION="add" + + # Grab one of the hosts/ports out of docdb master info + export DOCDB_HOST=$(echo "${DOCDB_MASTER_INFO}" | sed 's/\\n/\n/g' | sed 's/\\"//g' | /usr/bin/yq '.config.hosts[0].host') + export DOCDB_PORT=$(echo "${DOCDB_MASTER_INFO}" | sed 's/\\n/\n/g' | /usr/bin/yq '.config.hosts[0].port') + + echo "Params:" + echo " - MAS_INSTANCE_ID ................... ${MAS_INSTANCE_ID}" + echo " - MAS_CONFIG_DIR ................... ${MAS_CONFIG_DIR}" + echo " - DOCDB_HOST ................... ${DOCDB_HOST}" + echo " - DOCDB_PORT ................... ${DOCDB_PORT}" + echo " - DOCDB_MASTER_USERNAME ................... ${DOCDB_MASTER_USERNAME:0:2}" + echo " - DOCDB_MASTER_PASSWORD ................... ${DOCDB_MASTER_PASSWORD:0:2}" + echo " - DOCDB_INSTANCE_PASSWORD ................... ${DOCDB_INSTANCE_PASSWORD:0:2}" + echo " - USER_ACTION ................... ${USER_ACTION}" + echo " - OUTPUT_FILE ................... ${OUTPUT_FILE}" + echo + + mkdir -p ${MAS_CONFIG_DIR} + /opt/app-root/src/run-role.sh aws_documentdb_user || exit $? + + # The role should have created a file ${OUTPUT_FILE} containing the username/password it applied + # Read these so we can set the values in the instance's mongo secret in AWS SM + DOCDB_INSTANCE_USERNAME=$(/usr/bin/yq '.data.docdb_username' $OUTPUT_FILE | base64 -d) + DOCDB_INSTANCE_PASSWORD=$(/usr/bin/yq '.data.docdb_password' $OUTPUT_FILE | base64 -d) + + + echo + echo "================================================================================" + echo "Updating Instance Mongo Secret" + echo "================================================================================" + + SECRETS_KEY_SEPERATOR="/" + SECRET_NAME_MONGO=${ACCOUNT_ID}${SECRETS_KEY_SEPERATOR}${CLUSTER_ID}${SECRETS_KEY_SEPERATOR}${MAS_INSTANCE_ID}${SECRETS_KEY_SEPERATOR}mongo + DOCDB_MASTER_INFO_ESCAPED=${DOCDB_MASTER_INFO//\"/\\\"} + DOCDB_MASTER_INFO_ESCAPED=${DOCDB_MASTER_INFO_ESCAPED//$'\n'/\\n} + + echo "Params:" + echo " - ACCOUNT_ID ................... ${ACCOUNT_ID}" + echo " - CLUSTER_ID ................... ${CLUSTER_ID}" + echo " - MAS_INSTANCE_ID ................... ${MAS_INSTANCE_ID}" + echo " - SECRET_NAME_MONGO ................... ${SECRET_NAME_MONGO}" + echo " - SM_AWS_REGION ................... ${SM_AWS_REGION}" + echo " - SM_AWS_ACCESS_KEY_ID ................... ${SM_AWS_ACCESS_KEY_ID:0:2}" + echo " - SM_AWS_SECRET_ACCESS_KEY ................... ${SM_AWS_SECRET_ACCESS_KEY:0:2}" + echo " - DOCDB_INSTANCE_USERNAME ................... ${DOCDB_INSTANCE_USERNAME:0:2}" + echo " - DOCDB_INSTANCE_PASSWORD ................... ${DOCDB_INSTANCE_PASSWORD:0:2}" + echo " - DOCDB_MASTER_INFO_ESCAPED ................... ${DOCDB_MASTER_INFO_ESCAPED}" + echo + + + source /mascli/functions/gitops_utils + sm_login + sm_update_secret $SECRET_NAME_MONGO "{\"info\":\"$DOCDB_MASTER_INFO_ESCAPED\", \"username\":\"$DOCDB_INSTANCE_USERNAME\", \"password\":\"$DOCDB_INSTANCE_PASSWORD\"}" || exit $? + + restartPolicy: Never + serviceAccountName: aws-docdb-user-job + backoffLimit: 4 +{{- end }} diff --git a/instance-applications/010-ibm-sync-jobs/templates/00-placeholder_ConfigMap.yaml b/instance-applications/010-ibm-sync-jobs/templates/00-placeholder_ConfigMap.yaml new file mode 100644 index 000000000..64caab87e --- /dev/null +++ b/instance-applications/010-ibm-sync-jobs/templates/00-placeholder_ConfigMap.yaml @@ -0,0 +1,16 @@ +--- +# This to prevent AVP from complaining about there being no manifests +# if none of the other resources in this chart end up being rendered +apiVersion: v1 +kind: ConfigMap +metadata: + name: placeholder + namespace: mas-{{ .Values.instance_id }}-syncres + annotations: + argocd.argoproj.io/sync-wave: "00" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +data: + nodata: "" \ No newline at end of file diff --git a/instance-applications/010-ibm-sync-jobs/templates/01-ibm-cp4d-olm-480.yaml b/instance-applications/010-ibm-sync-jobs/templates/01-ibm-cp4d-olm-480.yaml new file mode 100644 index 000000000..834109335 --- /dev/null +++ b/instance-applications/010-ibm-sync-jobs/templates/01-ibm-cp4d-olm-480.yaml @@ -0,0 +1,1037 @@ +{{- if eq .Values.cpd_product_version "4.8.0" }} +--- +# This config map is created via cpd-cli manage apply-cr command: https://www.ibm.com/docs/en/cloud-paks/cp-data/4.6.x?topic=si-installing-components +kind: ConfigMap +apiVersion: v1 +metadata: + name: olm-utils-cm + namespace: mas-{{ .Values.instance_id }}-syncres + annotations: + argocd.argoproj.io/sync-wave: "00" + labels: + app.kubernetes.io/name: olm-utils +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +data: + release_components_meta: | + analyticsengine: + case_version: 8.0.0 + cr_version: 4.8.0 + csv_version: 5.0.0 + sub_channel: v5.0 + bigsql: + case_version: 10.0.0 + cr_version: 7.6.0 + csv_version: 10.0.0 + sub_channel: v10.0 + canvasbase: + case_version: 8.0.0 + cr_version: 8.0.0 + csv_version: 8.0.0 + sub_channel: v8.0 + ccs: + case_version: 8.0.0 + cr_version: 8.0.0 + csv_version: 8.0.0 + sub_channel: v8.0 + cognos_analytics: + case_version: 25.0.0 + cr_version: 25.0.0 + csv_version: 25.0.0 + sub_channel: v25.0 + cpd_platform: + case_version: 4.0.0 + cr_version: 4.8.0 + csv_version: 5.0.0 + sub_channel: v5.0 + cpfs: + case_version: 4.3.0 + csv_version: 4.3.0 + sub_channel: v4.3 + dashboard: + case_version: 2.0.0 + cr_version: 4.8.0 + csv_version: 2.0.0 + sub_channel: v2.0 + data_governor: + case_version: 4.0.4 + csv_version: 4.0.4 + sub_channel: v4.0 + datagate: + case_version: 7.0.0 + cr_version: 5.0.0 + csv_version: 5.0.0 + sub_channel: v5.0 + datarefinery: + case_version: 8.0.0 + cr_version: 8.0.0 + csv_version: 8.0.0 + sub_channel: v8.0 + datastage_ent: + case_version: 7.0.0 + cr_version: 4.8.0 + csv_version: 5.0.0 + sub_channel: v5.0 + datastage_ent_plus: + case_version: 7.0.0 + cr_version: 4.8.0 + csv_version: 5.0.0 + sub_channel: v5.0 + db2aaservice: + case_version: 4.8.0 + cr_version: 4.8.0 + csv_version: 5.0.0 + sub_channel: v5.0 + db2oltp: + case_version: 4.8.0 + cr_version: 4.8.0 + csv_version: 5.0.0 + sub_channel: v5.0 + db2u: + case_version: 5.5.0 + csv_version: 5.0.0 + sub_channel: v5.0 + db2wh: + case_version: 4.8.0 + cr_version: 4.8.0 + csv_version: 5.0.0 + sub_channel: v5.0 + dfo: + case_version: 1.0.0 + cr_version: 1.0.0 + csv_version: 1.0.0 + sub_channel: beta + dmc: + case_version: 7.0.0 + cr_version: 4.8.0 + csv_version: 4.0.0 + sub_channel: v4.0 + dods: + case_version: 8.0.0 + cr_version: 8.0.0 + csv_version: 8.0.0 + sub_channel: v8.0 + dp: + case_version: 8.0.0 + cr_version: 4.8.0 + csv_version: 8.0.0 + sub_channel: v8.0 + dpra: + case_version: 1.12.0 + cr_version: 1.12.0 + csv_version: 1.12.0 + sub_channel: v1.12 + dv: + case_version: 4.0.0 + cr_version: 2.2.0 + csv_version: 4.0.0 + sub_channel: v4.0 + edb_cp4d: + case_version: 4.18.0 + cr_version: 4.18.0 + csv_version: 4.18.0 + sub_channel: v4.18 + estap: + case_version: 1.1.0 + cr_version: 1.1.0 + csv_version: 1.1.0 + sub_channel: v1.1 + factsheet: + case_version: 3.0.0 + cr_version: 4.8.0 + csv_version: 3.0.0 + sub_channel: v3.0 + fdb_k8s: + csv_version: 3.1.5 + sub_channel: v3.1 + hee: + case_version: 4.8.0 + cr_version: 4.8.0 + csv_version: 4.80.0 + sub_channel: v4.80 + ibm-cert-manager: + case_version: 4.2.1 + csv_version: 4.2.1 + sub_channel: v4.2 + ibm-licensing: + case_version: 4.2.3 + csv_version: 4.2.3 + sub_channel: v4.2 + ibm_events_operator: + case_version: 4.8.0 + csv_version: 4.8.0 + sub_channel: v3 + ibm_redis_cp: + case_version: 1.1.3 + csv_version: 1.1.3 + sub_channel: v1.1 + informix: + case_version: 7.0.0 + csv_version: 7.0.0 + sub_channel: v7.0 + informix_cp4d: + case_version: 7.0.0 + cr_version: 7.0.0 + csv_version: 7.0.0 + sub_channel: v7.0 + mantaflow: + case_version: 1.15.0 + cr_version: 42.0.5 + csv_version: 1.15.0 + sub_channel: v1.15 + match360: + case_version: 3.3.0 + cr_version: 3.3.15 + csv_version: 3.3.0 + sub_channel: v3.3 + model_train: + case_version: 1.2.11 + csv_version: 1.1.13 + sub_channel: v1.1 + mongodb: + case_version: 4.18.0 + csv_version: 1.22.0 + sub_channel: stable + mongodb_cp4d: + case_version: 4.18.0 + cr_version: 4.18.0 + csv_version: 4.18.0 + sub_channel: v4.18 + opencontent_auditwebhook: + case_version: 1.0.24 + csv_version: 0.3.1 + opencontent_elasticsearch: + case_version: 1.1.1845 + cr_version: 1.1.1845 + csv_version: 1.1.1845 + sub_channel: v1.1 + opencontent_etcd: + case_version: 2.0.31 + csv_version: 1.0.22 + opencontent_fdb: + case_version: 3.1.5 + cr_version: 3.1.5 + csv_version: 3.1.5 + sub_channel: v3.1 + opencontent_minio: + case_version: 1.0.23 + csv_version: 1.0.18 + opencontent_rabbitmq: + case_version: 1.0.31 + csv_version: 1.0.22 + sub_channel: v1.0 + opencontent_redis: + case_version: 1.6.11 + csv_version: 1.6.11 + sub_channel: v1.6 + openpages: + case_version: 6.0.0 + cr_version: 9.000.1 + csv_version: 6.0.0 + sub_channel: v6.0 + openpages_instance: + cr_version: 9.000.1 + openscale: + case_version: 6.0.0 + cr_version: 4.8.0 + csv_version: 5.0.0 + sub_channel: v5.0 + planning_analytics: + case_version: 4.8.0 + cr_version: 4.8.0 + csv_version: 4.8.0 + sub_channel: v7.0 + postgresql: + case_version: 4.18.0 + csv_version: 1.18.7 + sub_channel: stable + productmaster: + case_version: 5.0.0 + cr_version: 5.0.0 + csv_version: 5.0.0 + sub_channel: v5.0 + productmaster_instance: + cr_version: 5.0.0 + replication: + case_version: 4.8.0 + cr_version: 4.8.0 + csv_version: 3.0.0 + licenses_urls: + IDRC: https://ibm.biz/BdSZ39 + IDRM: https://ibm.biz/BdSZ33 + IIDRC: https://ibm.biz/BdSZ3C + IIDRM: https://ibm.biz/BdSZ3T + sub_channel: v3.0 + rstudio: + case_version: 8.0.0 + cr_version: 8.0.0 + csv_version: 8.0.0 + sub_channel: v8.0 + scheduler: + case_version: 1.18.0 + cr_version: 1.18.0 + csv_version: 1.18.0 + sub_channel: v1.18 + spss: + case_version: 8.0.0 + cr_version: 8.0.0 + csv_version: 8.0.0 + sub_channel: v8.0 + syntheticdata: + case_version: 8.0.0 + cr_version: 8.0.0 + csv_version: 8.0.0 + sub_channel: v8.0 + voice_gateway: + case_version: 1.3.6 + cr_version: 1.3.6 + csv_version: 1.3.6 + sub_channel: v1.36 + watson_assistant: + case_version: 4.21.0 + cr_version: 4.8.0 + csv_version: 4.21.0 + sub_channel: v4.21 + watson_discovery: + case_version: 7.0.0 + cr_version: 4.8.0 + csv_version: 7.0.0 + sub_channel: v7.0 + watson_gateway: + case_version: 2.0.27 + csv_version: 1.0.22 + watson_speech: + case_version: 7.0.1 + cr_version: 4.8.0 + csv_version: 7.0.1 + sub_channel: v7.0 + watsonx_ai: + case_version: 8.0.0 + cr_version: 8.0.0 + csv_version: 8.0.0 + licenses_urls: + WX: https://ibm.biz/BdSR6v + sub_channel: v8.0 + watsonx_ai_ifm: + case_version: 8.0.0 + cr_version: 8.0.0 + csv_version: 8.0.0 + licenses_urls: + WX: https://ibm.biz/BdSR6v + sub_channel: v8.0 + watsonx_data: + case_version: 2.0.0 + cr_version: 1.1.0 + csv_version: 2.0.0 + licenses_urls: + WX: https://ibm.biz/BdSuVk + sub_channel: v2.0 + support_online_upgrade: false + wkc: + case_version: 4.8.0 + cr_version: 4.8.0 + csv_version: 1.8.0 + rules: + - apiGroups: + - zen.cpd.ibm.com + resources: + - zenextensions/status + verbs: + - get + - apiGroups: + - zen.cpd.ibm.com + resources: + - zenextension + - zenextensions + verbs: + - create + - delete + - list + - watch + - get + - patch + - update + - apiGroups: + - '' + - batch + - extensions + - apps + - policy + - rbac.authorization.k8s.io + - autoscaling + - route.openshift.io + - authorization.openshift.io + - networking.k8s.io + - metrics.k8s.io + - project.openshift.io + resources: + - pods + - pods/log + - poddisruptionbudgets + - secrets + - jobs + - configmaps + - deployments + - deployments/scale + - daemonsets + - projects + - statefulsets + - statefulsets/scale + - replicasets + - services + - services/finalizers + - persistentvolumeclaims + - cronjobs + - pods/exec + - pods/portforward + - serviceaccounts + - namespaces + - roles + - rolebindings + - horizontalpodautoscalers + - routes + - routes/custom-host + - ingresses + - endpoints + - cronjob + - networkpolicies + - events + - jobs/status + - pods/status + - resourcequotas + - resourcequotas/status + verbs: + - apply + - create + - get + - delete + - watch + - update + - edit + - exec + - list + - patch + - deletecollection + - apiGroups: + - cpd.ibm.com + resources: + - cpdinstalls + - cpdinstalls/spec + - cpdinstalls/status + verbs: + - apply + - create + - delete + - edit + - get + - list + - patch + - update + - watch + - apiGroups: + - build.openshift.io + resources: + - buildconfigs + - buildconfigs/instantiate + - buildconfigs/instantiatebinary + - buildconfigs/webhooks + - buildlogs + - builds + - builds/clone + - builds/details + - builds/log + verbs: + - create + - delete + - list + - watch + - get + - patch + - update + - apiGroups: + - image.openshift.io + resources: + - imagestreams + - imagestreams/layers + - imagestreams/secrets + - imagestreams/status + - imagestreamimages + - imagestreamimports + - imagestreammappings + - imagestreamtags + verbs: + - create + - delete + - list + - watch + - get + - patch + - update + - apiGroups: + - '' + resources: + - pods + verbs: + - get + - apiGroups: + - apps + resources: + - replicasets + - deployments + verbs: + - get + - apiGroups: + - wkc.cpd.ibm.com + resources: + - wkc + - wkc/spec + - wkc/status + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - zen.cpd.ibm.com + resources: + - zenservices + - zenservices/spec + - zenservices/status + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - ccs.cpd.ibm.com + resources: + - ccs + - ccs/spec + - ccs/status + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - ae.cpd.ibm.com + resources: + - analyticsengines + - analyticsengines/spec + - analyticsengines/status + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - ds.cpd.ibm.com + resources: + - datastages + - datastages/spec + - datastages/status + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - datarefinery.cpd.ibm.com + resources: + - datarefinery + - datarefinery/spec + - datarefinery/status + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - databases.cpd.ibm.com + resources: + - db2aaserviceservices + - db2aaserviceservices/spec + - db2aaserviceservices/status + - db2aaserviceservices/finalizers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - db2u.databases.ibm.com + resources: + - db2uclusters + - db2uclusters/spec + - db2uclusters/status + - db2uclusters/finalizers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - iis.cpd.ibm.com + resources: + - iis + - iis/spec + - iis/status + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - ug.wkc.cpd.ibm.com + resources: + - ug + - ug/spec + - ug/status + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - operator.ibm.com + resources: + - operandrequests + - operandregistries + - operandconfigs + - operandbindinfos + verbs: + - create + - get + - list + - patch + - update + - watch + - delete + - use + - apiGroups: + - apps.foundationdb.org + resources: + - foundationdbclusters + - foundationdbbackups + - foundationdbrestores + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - apiGroups: + - apps.foundationdb.org + resources: + - foundationdbclusters/status + - foundationdbbackups/status + verbs: + - get + - update + - patch + - apiGroups: + - foundationdb.opencontent.ibm.com + resources: + - fdbclusters + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - apiGroups: + - foundationdb.opencontent.ibm.com + resources: + - fdbclusters/finalizers + verbs: + - update + - apiGroups: + - foundationdb.opencontent.ibm.com + resources: + - fdbclusters/status + verbs: + - get + - patch + - update + - apiGroups: + - certmanager.k8s.io + resources: + - issuers + - issuers/status + - issuers/finalizers + - certificates + - certificates/status + - certificates/finalizers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - ibmcpcs.ibm.com + resources: + - secretshares + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + sub_channel: v5.0 + wml: + case_version: 8.0.0 + cr_version: 4.8.0 + csv_version: 5.0.0 + sub_channel: v5.0 + wml_accelerator: + case_version: 4.8.0 + cr_version: 4.8.0 + csv_version: 5.0.0 + sub_channel: v5.0 + ws: + case_version: 8.0.0 + cr_version: 8.0.0 + csv_version: 8.0.0 + sub_channel: v8.0 + ws_pipelines: + case_version: 8.0.0 + cr_version: 4.8.0 + csv_version: 8.0.0 + sub_channel: v8.0 + ws_runtimes: + case_version: 8.0.0 + cr_version: 8.0.0 + csv_version: 8.0.0 + sub_channel: v8.0 + wxd_optimizerplus: + component_dependencies: + - db2u + zen: + cr_version: 5.1.0 + csv_version: 5.1.0 + rules: + - apiGroups: + - '' + - batch + - extensions + - apps + - policy + - rbac.authorization.k8s.io + - autoscaling + - route.openshift.io + - authorization.openshift.io + - networking.k8s.io + - metrics.k8s.io + - project.openshift.io + - template.openshift.io + - autoscaling.k8s.io + resources: + - pods + - pods/log + - pods/eviction + - poddisruptionbudgets + - projects + - secrets + - jobs + - configmaps + - deployments + - deployments/scale + - daemonsets + - statefulsets + - statefulsets/scale + - replicasets + - replicationcontrollers + - services + - services/finalizers + - persistentvolumes + - persistentvolumeclaims + - cronjobs + - pods/exec + - pods/portforward + - serviceaccounts + - namespaces + - roles + - rolebindings + - horizontalpodautoscalers + - verticalpodautoscalers + - routes + - routes/custom-host + - ingresses + - endpoints + - cronjob + - networkpolicies + - events + - jobs/status + - pods/status + - resourcequotas + - resourcequotas/status + - processedtemplates + verbs: + - create + - get + - delete + - watch + - update + - list + - patch + - deletecollection + - apiGroups: + - cpd.ibm.com + resources: + - cpdinstalls + - cpdinstalls/spec + - cpdinstalls/status + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - build.openshift.io + resources: + - buildconfigs + - buildconfigs/instantiate + - buildconfigs/instantiatebinary + - buildconfigs/webhooks + - buildlogs + - builds + - builds/clone + - builds/details + - builds/log + verbs: + - create + - delete + - list + - watch + - get + - patch + - update + - apiGroups: + - image.openshift.io + resources: + - imagestreams + - imagestreams/layers + - imagestreams/secrets + - imagestreams/status + - imagestreamimages + - imagestreamimports + - imagestreammappings + - imagestreamtags + verbs: + - create + - delete + - list + - watch + - get + - patch + - update + - apiGroups: + - apps + resourceNames: + - cpd-zen-operator + resources: + - deployments/finalizers + verbs: + - update + - apiGroups: + - zen.cpd.ibm.com + resources: + - zenservice + - zenservices + - zenservice/status + - zenservices/status + - zenextension + - zenextensions + - zenextension/status + - zenextensions/status + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - ibm.com + resources: + - paralleljob + - paralleljobs + - paralleljob/status + - paralleljobs/status + verbs: + - get + - list + - apiGroups: + - operator.ibm.com + resources: + - commonservices + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - certmanager.k8s.io + resources: + - issuers + - issuers/status + - issuers/finalizers + - certificates + - certificates/status + - certificates/finalizers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - cert-manager.io + resources: + - issuers + - issuers/status + - issuers/finalizers + - certificates + - certificates/status + - certificates/finalizers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - oidc.security.ibm.com + resources: + - client + - clients + verbs: + - create + - get + - list + - patch + - update + - watch + - delete + - use + - apiGroups: + - operator.ibm.com + resources: + - operandrequest + - operandrequests + verbs: + - create + - get + - list + - patch + - update + - watch + - delete + - use + - apiGroups: + - operators.coreos.com + resources: + - clusterserviceversions + verbs: + - get + - list + - watch + - apiGroups: + - operators.coreos.com + resources: + - operatorconditions + - operatorconditions/status + verbs: + - get + - list + - watch + - update + - patch + - apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - get + - create + - apiGroups: + - ibm.com + resources: + - resourceplans + - resourcematches + verbs: + - get + - list + - watch + - update + - patch + - create + - delete + - deletecollection + - apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - create + - get + - list + - patch + - update + - watch + - delete + - use + release_version: 4.8.0 + +{{- end }} diff --git a/instance-applications/010-ibm-sync-jobs/templates/01-ibm-cp4d-olm-481.yaml b/instance-applications/010-ibm-sync-jobs/templates/01-ibm-cp4d-olm-481.yaml new file mode 100644 index 000000000..a08833691 --- /dev/null +++ b/instance-applications/010-ibm-sync-jobs/templates/01-ibm-cp4d-olm-481.yaml @@ -0,0 +1,1035 @@ +{{- if eq .Values.cpd_product_version "4.8.1" }} +--- +# This config map is created via cpd-cli manage apply-cr command: https://www.ibm.com/docs/en/cloud-paks/cp-data/4.6.x?topic=si-installing-components +kind: ConfigMap +apiVersion: v1 +metadata: + name: olm-utils-cm + namespace: mas-{{ .Values.instance_id }}-syncres + annotations: + argocd.argoproj.io/sync-wave: "00" + labels: + app.kubernetes.io/name: olm-utils +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +data: + release_components_meta: | + analyticsengine: + case_version: 8.1.0 + cr_version: 4.8.1 + csv_version: 5.1.0 + sub_channel: v5.1 + bigsql: + case_version: 10.0.0 + cr_version: 7.6.0 + csv_version: 10.0.0 + sub_channel: v10.0 + canvasbase: + case_version: 8.1.0 + cr_version: 8.1.0 + csv_version: 8.1.0 + sub_channel: v8.1 + ccs: + case_version: 8.1.0 + cr_version: 8.1.0 + csv_version: 8.1.0 + sub_channel: v8.1 + cognos_analytics: + case_version: 25.0.0 + cr_version: 25.0.0 + csv_version: 25.0.0 + sub_channel: v25.0 + cpd_platform: + case_version: 4.1.0 + cr_version: 4.8.1 + csv_version: 5.1.0 + sub_channel: v5.1 + cpfs: + case_version: 4.3.0 + csv_version: 4.3.0 + sub_channel: v4.3 + dashboard: + case_version: 2.1.0 + cr_version: 4.8.1 + csv_version: 2.1.0 + sub_channel: v2.1 + data_governor: + case_version: 4.1.0 + csv_version: 4.1.0 + sub_channel: v4.1 + datagate: + case_version: 7.1.0 + cr_version: 5.1.0 + csv_version: 5.1.0 + sub_channel: v5.1 + datarefinery: + case_version: 8.1.0 + cr_version: 8.1.0 + csv_version: 8.1.0 + sub_channel: v8.1 + datastage_ent: + case_version: 7.1.0 + cr_version: 4.8.1 + csv_version: 5.1.0 + sub_channel: v5.1 + datastage_ent_plus: + case_version: 7.1.0 + cr_version: 4.8.1 + csv_version: 5.1.0 + sub_channel: v5.1 + db2aaservice: + case_version: 4.8.0 + cr_version: 4.8.0 + csv_version: 5.0.0 + sub_channel: v5.0 + db2oltp: + case_version: 4.8.0 + cr_version: 4.8.0 + csv_version: 5.0.0 + sub_channel: v5.0 + db2u: + case_version: 5.5.0 + csv_version: 5.0.0 + sub_channel: v5.0 + db2wh: + case_version: 4.8.0 + cr_version: 4.8.0 + csv_version: 5.0.0 + sub_channel: v5.0 + dfo: + case_version: 1.0.0 + cr_version: 1.0.0 + csv_version: 1.0.0 + sub_channel: beta + dmc: + case_version: 7.0.0 + cr_version: 4.8.0 + csv_version: 4.0.0 + sub_channel: v4.0 + dods: + case_version: 8.1.0 + cr_version: 8.1.0 + csv_version: 8.1.0 + sub_channel: v8.1 + dp: + case_version: 8.1.0 + cr_version: 4.8.1 + csv_version: 8.1.0 + sub_channel: v8.1 + dpra: + case_version: 1.13.0 + cr_version: 1.13.0 + csv_version: 1.13.0 + sub_channel: v1.13 + dv: + case_version: 4.0.0 + cr_version: 2.2.0 + csv_version: 4.0.0 + sub_channel: v4.0 + edb_cp4d: + case_version: 4.19.0 + cr_version: 4.19.0 + csv_version: 4.19.0 + sub_channel: v4.19 + estap: + case_version: 1.1.0 + cr_version: 1.1.0 + csv_version: 1.1.0 + sub_channel: v1.1 + factsheet: + case_version: 3.1.0 + cr_version: 4.8.1 + csv_version: 3.1.0 + sub_channel: v3.1 + fdb_k8s: + csv_version: 3.1.6 + sub_channel: v3.1 + hee: + case_version: 4.8.1 + cr_version: 4.8.1 + csv_version: 4.81.0 + sub_channel: v4.81 + ibm-cert-manager: + case_version: 4.2.1 + csv_version: 4.2.1 + sub_channel: v4.2 + ibm-licensing: + case_version: 4.2.3 + csv_version: 4.2.3 + sub_channel: v4.2 + ibm_events_operator: + case_version: 4.9.0 + csv_version: 4.9.0 + sub_channel: v3 + ibm_redis_cp: + case_version: 1.1.4 + csv_version: 1.1.4 + sub_channel: v1.1 + informix: + case_version: 7.0.0 + csv_version: 7.0.0 + sub_channel: v7.0 + informix_cp4d: + case_version: 7.0.0 + cr_version: 7.0.0 + csv_version: 7.0.0 + sub_channel: v7.0 + mantaflow: + case_version: 1.16.0 + cr_version: 42.1.0 + csv_version: 1.16.0 + sub_channel: v1.16 + match360: + case_version: 3.4.0 + cr_version: 3.4.36 + csv_version: 3.4.0 + sub_channel: v3.4 + model_train: + case_version: 1.2.11 + csv_version: 1.1.13 + sub_channel: v1.1 + mongodb: + case_version: 4.19.0 + csv_version: 1.23.0 + sub_channel: stable + mongodb_cp4d: + case_version: 4.19.0 + cr_version: 4.19.0 + csv_version: 4.19.0 + sub_channel: v4.19 + opencontent_auditwebhook: + case_version: 1.0.24 + csv_version: 0.3.1 + opencontent_elasticsearch: + case_version: 1.1.1845 + cr_version: 1.1.1845 + csv_version: 1.1.1845 + sub_channel: v1.1 + opencontent_etcd: + case_version: 2.0.31 + csv_version: 1.0.22 + opencontent_fdb: + case_version: 3.1.6 + cr_version: 3.1.6 + csv_version: 3.1.6 + sub_channel: v3.1 + opencontent_minio: + case_version: 1.0.23 + csv_version: 1.0.18 + opencontent_rabbitmq: + case_version: 1.0.31 + csv_version: 1.0.22 + sub_channel: v1.0 + opencontent_redis: + case_version: 1.6.11 + csv_version: 1.6.11 + sub_channel: v1.6 + openpages: + case_version: 6.0.0 + cr_version: 9.000.1 + csv_version: 6.0.0 + sub_channel: v6.0 + openpages_instance: + cr_version: 9.000.1 + openscale: + case_version: 6.0.0 + cr_version: 4.8.0 + csv_version: 5.0.0 + sub_channel: v5.0 + planning_analytics: + case_version: 4.8.1 + cr_version: 4.8.1 + csv_version: 4.8.1 + sub_channel: v7.1 + postgresql: + case_version: 4.18.0 + csv_version: 1.18.7 + sub_channel: stable + productmaster: + case_version: 5.1.0 + cr_version: 5.1.0 + csv_version: 5.1.0 + sub_channel: v5.1 + productmaster_instance: + cr_version: 5.1.0 + replication: + case_version: 4.8.1 + cr_version: 4.8.1 + csv_version: 3.1.0 + licenses_urls: + IDRC: https://ibm.biz/BdSZ39 + IDRM: https://ibm.biz/BdSZ33 + IIDRC: https://ibm.biz/BdSZ3C + IIDRM: https://ibm.biz/BdSZ3T + sub_channel: v3.1 + rstudio: + case_version: 8.1.0 + cr_version: 8.1.0 + csv_version: 8.1.0 + sub_channel: v8.1 + scheduler: + case_version: 1.19.0 + cr_version: 1.19.0 + csv_version: 1.19.0 + sub_channel: v1.19 + spss: + case_version: 8.1.0 + cr_version: 8.1.0 + csv_version: 8.1.0 + sub_channel: v8.1 + syntheticdata: + case_version: 8.1.0 + cr_version: 8.1.0 + csv_version: 8.1.0 + sub_channel: v8.1 + voice_gateway: + case_version: 1.3.6 + cr_version: 1.3.6 + csv_version: 1.3.6 + sub_channel: v1.36 + watson_assistant: + case_version: 4.21.0 + cr_version: 4.8.0 + csv_version: 4.21.0 + sub_channel: v4.21 + watson_discovery: + case_version: 7.0.0 + cr_version: 4.8.0 + csv_version: 7.0.0 + sub_channel: v7.0 + watson_gateway: + case_version: 2.0.27 + csv_version: 1.0.22 + watson_speech: + case_version: 7.0.1 + cr_version: 4.8.0 + csv_version: 7.0.1 + sub_channel: v7.0 + watsonx_ai: + case_version: 8.1.0 + cr_version: 8.1.0 + csv_version: 8.1.0 + licenses_urls: + WX: https://ibm.biz/BdSV76 + sub_channel: v8.1 + watsonx_ai_ifm: + case_version: 8.1.0 + cr_version: 8.1.0 + csv_version: 8.1.0 + licenses_urls: + WX: https://ibm.biz/BdSV76 + sub_channel: v8.1 + watsonx_data: + case_version: 2.1.0 + cr_version: 1.1.1 + csv_version: 2.1.0 + licenses_urls: + WX: https://ibm.biz/BdSJxS + sub_channel: v2.1 + wkc: + case_version: 4.8.1 + cr_version: 4.8.1 + csv_version: 1.8.1 + rules: + - apiGroups: + - zen.cpd.ibm.com + resources: + - zenextensions/status + verbs: + - get + - apiGroups: + - zen.cpd.ibm.com + resources: + - zenextension + - zenextensions + verbs: + - create + - delete + - list + - watch + - get + - patch + - update + - apiGroups: + - '' + - batch + - extensions + - apps + - policy + - rbac.authorization.k8s.io + - autoscaling + - route.openshift.io + - authorization.openshift.io + - networking.k8s.io + - metrics.k8s.io + - project.openshift.io + resources: + - pods + - pods/log + - poddisruptionbudgets + - secrets + - jobs + - configmaps + - deployments + - deployments/scale + - daemonsets + - projects + - statefulsets + - statefulsets/scale + - replicasets + - services + - services/finalizers + - persistentvolumeclaims + - cronjobs + - pods/exec + - pods/portforward + - serviceaccounts + - namespaces + - roles + - rolebindings + - horizontalpodautoscalers + - routes + - routes/custom-host + - ingresses + - endpoints + - cronjob + - networkpolicies + - events + - jobs/status + - pods/status + - resourcequotas + - resourcequotas/status + verbs: + - apply + - create + - get + - delete + - watch + - update + - edit + - exec + - list + - patch + - deletecollection + - apiGroups: + - cpd.ibm.com + resources: + - cpdinstalls + - cpdinstalls/spec + - cpdinstalls/status + verbs: + - apply + - create + - delete + - edit + - get + - list + - patch + - update + - watch + - apiGroups: + - build.openshift.io + resources: + - buildconfigs + - buildconfigs/instantiate + - buildconfigs/instantiatebinary + - buildconfigs/webhooks + - buildlogs + - builds + - builds/clone + - builds/details + - builds/log + verbs: + - create + - delete + - list + - watch + - get + - patch + - update + - apiGroups: + - image.openshift.io + resources: + - imagestreams + - imagestreams/layers + - imagestreams/secrets + - imagestreams/status + - imagestreamimages + - imagestreamimports + - imagestreammappings + - imagestreamtags + verbs: + - create + - delete + - list + - watch + - get + - patch + - update + - apiGroups: + - '' + resources: + - pods + verbs: + - get + - apiGroups: + - apps + resources: + - replicasets + - deployments + verbs: + - get + - apiGroups: + - wkc.cpd.ibm.com + resources: + - wkc + - wkc/spec + - wkc/status + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - zen.cpd.ibm.com + resources: + - zenservices + - zenservices/spec + - zenservices/status + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - ccs.cpd.ibm.com + resources: + - ccs + - ccs/spec + - ccs/status + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - ae.cpd.ibm.com + resources: + - analyticsengines + - analyticsengines/spec + - analyticsengines/status + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - ds.cpd.ibm.com + resources: + - datastages + - datastages/spec + - datastages/status + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - datarefinery.cpd.ibm.com + resources: + - datarefinery + - datarefinery/spec + - datarefinery/status + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - databases.cpd.ibm.com + resources: + - db2aaserviceservices + - db2aaserviceservices/spec + - db2aaserviceservices/status + - db2aaserviceservices/finalizers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - db2u.databases.ibm.com + resources: + - db2uclusters + - db2uclusters/spec + - db2uclusters/status + - db2uclusters/finalizers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - iis.cpd.ibm.com + resources: + - iis + - iis/spec + - iis/status + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - ug.wkc.cpd.ibm.com + resources: + - ug + - ug/spec + - ug/status + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - operator.ibm.com + resources: + - operandrequests + - operandregistries + - operandconfigs + - operandbindinfos + verbs: + - create + - get + - list + - patch + - update + - watch + - delete + - use + - apiGroups: + - apps.foundationdb.org + resources: + - foundationdbclusters + - foundationdbbackups + - foundationdbrestores + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - apiGroups: + - apps.foundationdb.org + resources: + - foundationdbclusters/status + - foundationdbbackups/status + verbs: + - get + - update + - patch + - apiGroups: + - foundationdb.opencontent.ibm.com + resources: + - fdbclusters + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - apiGroups: + - foundationdb.opencontent.ibm.com + resources: + - fdbclusters/finalizers + verbs: + - update + - apiGroups: + - foundationdb.opencontent.ibm.com + resources: + - fdbclusters/status + verbs: + - get + - patch + - update + - apiGroups: + - certmanager.k8s.io + resources: + - issuers + - issuers/status + - issuers/finalizers + - certificates + - certificates/status + - certificates/finalizers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - ibmcpcs.ibm.com + resources: + - secretshares + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + sub_channel: v5.1 + wml: + case_version: 8.1.0 + cr_version: 4.8.1 + csv_version: 5.1.0 + sub_channel: v5.1 + wml_accelerator: + case_version: 4.8.1 + cr_version: 4.8.1 + csv_version: 5.1.0 + sub_channel: v5.1 + ws: + case_version: 8.1.0 + cr_version: 8.1.0 + csv_version: 8.1.0 + sub_channel: v8.1 + ws_pipelines: + case_version: 8.1.0 + cr_version: 4.8.1 + csv_version: 8.1.0 + sub_channel: v8.1 + ws_runtimes: + case_version: 8.1.0 + cr_version: 8.1.0 + csv_version: 8.1.0 + sub_channel: v8.1 + wxd_optimizerplus: + component_dependencies: + - db2u + zen: + cr_version: 5.1.0 + csv_version: 5.1.0 + rules: + - apiGroups: + - '' + - batch + - extensions + - apps + - policy + - rbac.authorization.k8s.io + - autoscaling + - route.openshift.io + - authorization.openshift.io + - networking.k8s.io + - metrics.k8s.io + - project.openshift.io + - template.openshift.io + - autoscaling.k8s.io + resources: + - pods + - pods/log + - pods/eviction + - poddisruptionbudgets + - projects + - secrets + - jobs + - configmaps + - deployments + - deployments/scale + - daemonsets + - statefulsets + - statefulsets/scale + - replicasets + - replicationcontrollers + - services + - services/finalizers + - persistentvolumes + - persistentvolumeclaims + - cronjobs + - pods/exec + - pods/portforward + - serviceaccounts + - namespaces + - roles + - rolebindings + - horizontalpodautoscalers + - verticalpodautoscalers + - routes + - routes/custom-host + - ingresses + - endpoints + - cronjob + - networkpolicies + - events + - jobs/status + - pods/status + - resourcequotas + - resourcequotas/status + - processedtemplates + verbs: + - create + - get + - delete + - watch + - update + - list + - patch + - deletecollection + - apiGroups: + - cpd.ibm.com + resources: + - cpdinstalls + - cpdinstalls/spec + - cpdinstalls/status + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - build.openshift.io + resources: + - buildconfigs + - buildconfigs/instantiate + - buildconfigs/instantiatebinary + - buildconfigs/webhooks + - buildlogs + - builds + - builds/clone + - builds/details + - builds/log + verbs: + - create + - delete + - list + - watch + - get + - patch + - update + - apiGroups: + - image.openshift.io + resources: + - imagestreams + - imagestreams/layers + - imagestreams/secrets + - imagestreams/status + - imagestreamimages + - imagestreamimports + - imagestreammappings + - imagestreamtags + verbs: + - create + - delete + - list + - watch + - get + - patch + - update + - apiGroups: + - apps + resourceNames: + - cpd-zen-operator + resources: + - deployments/finalizers + verbs: + - update + - apiGroups: + - zen.cpd.ibm.com + resources: + - zenservice + - zenservices + - zenservice/status + - zenservices/status + - zenextension + - zenextensions + - zenextension/status + - zenextensions/status + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - ibm.com + resources: + - paralleljob + - paralleljobs + - paralleljob/status + - paralleljobs/status + verbs: + - get + - list + - apiGroups: + - operator.ibm.com + resources: + - commonservices + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - certmanager.k8s.io + resources: + - issuers + - issuers/status + - issuers/finalizers + - certificates + - certificates/status + - certificates/finalizers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - cert-manager.io + resources: + - issuers + - issuers/status + - issuers/finalizers + - certificates + - certificates/status + - certificates/finalizers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - oidc.security.ibm.com + resources: + - client + - clients + verbs: + - create + - get + - list + - patch + - update + - watch + - delete + - use + - apiGroups: + - operator.ibm.com + resources: + - operandrequest + - operandrequests + verbs: + - create + - get + - list + - patch + - update + - watch + - delete + - use + - apiGroups: + - operators.coreos.com + resources: + - clusterserviceversions + verbs: + - get + - list + - watch + - apiGroups: + - operators.coreos.com + resources: + - operatorconditions + - operatorconditions/status + verbs: + - get + - list + - watch + - update + - patch + - apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - get + - create + - apiGroups: + - ibm.com + resources: + - resourceplans + - resourcematches + verbs: + - get + - list + - watch + - update + - patch + - create + - delete + - deletecollection + - apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - create + - get + - list + - patch + - update + - watch + - delete + - use + release_version: 4.8.1 +{{- end }} diff --git a/instance-applications/010-ibm-sync-jobs/templates/01-ibm-cp4d-presync.yaml b/instance-applications/010-ibm-sync-jobs/templates/01-ibm-cp4d-presync.yaml new file mode 100644 index 000000000..b8f84fe12 --- /dev/null +++ b/instance-applications/010-ibm-sync-jobs/templates/01-ibm-cp4d-presync.yaml @@ -0,0 +1,339 @@ +{{- if not (empty .Values.cpd_product_version) }} + +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: presync-cpd-olm-sa + namespace: mas-{{ .Values.instance_id }}-syncres + annotations: + argocd.argoproj.io/sync-wave: "001" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: presync-cpd-olm-role-{{ .Values.instance_id }} + annotations: + argocd.argoproj.io/sync-wave: "001" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +rules: + - verbs: + - get + - list + apiGroups: + - packages.operators.coreos.com + - packages.operators.coreos.com/v1 + resources: + - packagemanifests + - verbs: + - get + apiGroups: + - '' + resources: + - configmaps + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: presync-cpd-olm-rb-{{ .Values.instance_id }} + annotations: + argocd.argoproj.io/sync-wave: "002" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +subjects: + - kind: ServiceAccount + name: presync-cpd-olm-sa + namespace: mas-{{ .Values.instance_id }}-syncres +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: presync-cpd-olm-role-{{ .Values.instance_id }} + +--- +apiVersion: batch/v1 +kind: Job +metadata: + # Generate the job name by suffixing with a hash of all chart values + # This is to ensure that ArgoCD will delete and recreate the job if (and only if) anything changes + # Any change to cluster config will trigger a rerun of the job. + # The job is idempotent and quick so no real harm in running it when we don't actually need to. + name: "presync-cpd-olm-job-{{ .Values | toYaml | adler32sum }}" + namespace: mas-{{ .Values.instance_id }}-syncres + annotations: + argocd.argoproj.io/sync-wave: "003" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + template: +{{- if .Values.custom_labels }} + metadata: + labels: +{{ .Values.custom_labels | toYaml | indent 8 }} +{{- end }} + spec: + containers: + - name: run + image: quay.io/ibmmas/cli:9.4.0-pre.gitops + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 200m + memory: 512Mi + requests: + cpu: 10m + memory: 64Mi + env: + - name: ACCOUNT_ID + value: {{ .Values.account_id }} + - name: REGION_ID + value: {{ .Values.region_id }} + - name: CLUSTER_ID + value: {{ .Values.cluster_id }} + - name: INSTANCE_ID + value: {{ .Values.instance_id }} + # Hard-coded for now: + - name: AVP_TYPE + value: "aws" + volumeMounts: + - name: aws + mountPath: /etc/mas/creds/aws + command: + - /bin/sh + - -c + - | + + + set -e + + # might as well take advantage of gitops_utils for sm_ functions as we're using the cli image + source /mascli/functions/gitops_utils + + # NOTE: cannot just render AWS secrets into here, as it will be exposed in the ArgoCD UI + # Instead, we pass them into a secret (ArgoCD knows to hide any data fields in k8s secrets), + # mount the secret on the jobs filesystem, and read them in here + SM_AWS_ACCESS_KEY_ID=$(cat /etc/mas/creds/aws/aws_access_key_id) + SM_AWS_SECRET_ACCESS_KEY=$(cat /etc/mas/creds/aws/aws_secret_access_key) + + echo + echo "================================================================================" + echo "Storing CP4D Dependencies' Channel and Version info in AWS Secret Manager" + echo "================================================================================" + + + echo + echo "Fetching channel for namespace scope from PackageManifest" + echo "================================================================================" + + export NAMESPACE_SCOPE_CHANNEL=$((oc get PackageManifest ibm-namespace-scope-operator -o json) | jq -r '[.status.channels | .[].name] | last') + if [[ -z "${NAMESPACE_SCOPE_CHANNEL}" ]]; then + echo "Failed to fetch namespace scope channel" + exit 1 + fi + + echo + echo "Fetching values from olm-utils-cm config map" + echo "================================================================================" + echo "Fetching IBM Licensing Channel..." + export LICENSING_CHANNEL=$((oc get ConfigMap olm-utils-cm -o json) | yq -r '.data.release_components_meta' | yq -r '.["ibm-licensing"].sub_channel') + if [[ -z "${LICENSING_CHANNEL}" ]]; then + echo "Failed to fetch ibm licensing channel" + exit 1 + fi + + echo + echo "Fetching IBM Licensing Version..." + export LICENSING_VERSION=$((oc get ConfigMap olm-utils-cm -o json) | yq -r '.data.release_components_meta' | yq -r '.["ibm-licensing"].csv_version') + if [[ -z "${LICENSING_VERSION}" ]]; then + echo "Failed to fetch ibm licensing version" + exit 1 + fi + + echo + echo "Fetching CPFS Channel..." + export CPFS_CHANNEL=$((oc get ConfigMap olm-utils-cm -o json) | yq -r '.data.release_components_meta' | yq -r '.cpfs.sub_channel') + if [[ -z "${CPFS_CHANNEL}" ]]; then + echo "Failed to fetch cpfs channel" + exit 1 + fi + + echo + echo "Fetching CPD Platform Channel..." + export CPD_PLATFORM_CHANNEL=$((oc get ConfigMap olm-utils-cm -o json) | yq -r '.data.release_components_meta' | yq -r '.cpd_platform.sub_channel') + if [[ -z "${CPD_PLATFORM_CHANNEL}" ]]; then + echo "Failed to fetch cpd platform channel" + exit 1 + fi + + echo + echo "Fetching Watson Studio Version..." + export WSL_VERSION=$((oc get ConfigMap olm-utils-cm -o json) | yq -r '.data.release_components_meta' | yq -r '.ws.cr_version') + if [[ -z "${WSL_VERSION}" ]]; then + echo "Failed to fetch watson studio version" + exit 1 + fi + + echo + echo "Fetching Watson Studio Channel..." + export WSL_CHANNEL=$((oc get ConfigMap olm-utils-cm -o json) | yq -r '.data.release_components_meta' | yq -r '.ws.sub_channel') + if [[ -z "${WSL_CHANNEL}" ]]; then + echo "Failed to fetch watson studio channel" + exit 1 + fi + + echo + echo "Fetching CCS Version..." + export CCS_VERSION=$((oc get ConfigMap olm-utils-cm -o json) | yq -r '.data.release_components_meta' | yq -r '.ccs.cr_version') + if [[ -z "${CCS_VERSION}" ]]; then + echo "Failed to fetch ccs version" + exit 1 + fi + + echo + echo "Fetching CCS Channel..." + export CCS_CHANNEL=$((oc get ConfigMap olm-utils-cm -o json) | yq -r '.data.release_components_meta' | yq -r '.ccs.sub_channel') + if [[ -z "${CCS_CHANNEL}" ]]; then + echo "Failed to fetch ccs channel" + exit 1 + fi + + echo + echo "Fetching Watson Studio Runtimes Version..." + export WS_RUNTIMES_VERSION=$((oc get ConfigMap olm-utils-cm -o json) | yq -r '.data.release_components_meta' | yq -r '.ws_runtimes.cr_version') + if [[ -z "${WS_RUNTIMES_VERSION}" ]]; then + echo "Failed to fetch watson studio runtimes version" + exit 1 + fi + + echo + echo "Fetching Watson Studio Runtimes Channel..." + export WS_RUNTIMES_CHANNEL=$((oc get ConfigMap olm-utils-cm -o json) | yq -r '.data.release_components_meta' | yq -r '.ws_runtimes.sub_channel') + if [[ -z "${WS_RUNTIMES_CHANNEL}" ]]; then + echo "Failed to fetch watson studio runtimes channel" + exit 1 + fi + + echo + echo "Fetching DataRefinery Version..." + export DATAREFINERY_VERSION=$((oc get ConfigMap olm-utils-cm -o json) | yq -r '.data.release_components_meta' | yq -r '.datarefinery.cr_version') + if [[ -z "${DATAREFINERY_VERSION}" ]]; then + echo "Failed to fetch datarefinery version" + exit 1 + fi + + echo + echo "Fetching DataRefinery Channel..." + export DATAREFINERY_CHANNEL=$((oc get ConfigMap olm-utils-cm -o json) | yq -r '.data.release_components_meta' | yq -r '.datarefinery.sub_channel') + if [[ -z "${DATAREFINERY_CHANNEL}" ]]; then + echo "Failed to fetch datarefinery channel" + exit 1 + fi + + echo + echo "Fetching Opencontent Rabbitmq Channel..." + export RABBITMQ_CHANNEL=$((oc get ConfigMap olm-utils-cm -o json) | yq -r '.data.release_components_meta' | yq -r '.opencontent_rabbitmq.sub_channel') + if [[ -z "${RABBITMQ_CHANNEL}" ]]; then + echo "Failed to fetch opencontent rabbitmq channel" + exit 1 + fi + + echo + echo "Fetching Opencontent Elasticsearch Channel..." + export ELASTICSEARCH_CHANNEL=$((oc get ConfigMap olm-utils-cm -o json) | yq -r '.data.release_components_meta' | yq -r '.opencontent_elasticsearch.sub_channel') + if [[ -z "${ELASTICSEARCH_CHANNEL}" ]]; then + echo "Failed to fetch opencontent elasticsearch channel" + exit 1 + fi + + + export SM_AWS_REGION=${REGION_ID} + sm_login + + SECRET_NAME_NAMESPACE_SCOPE_CHANNEL=${ACCOUNT_ID}/${CLUSTER_ID}/${INSTANCE_ID}/namespace_scope_channel + SECRET_NAME_LICENSING_CHANNEL=${ACCOUNT_ID}/${CLUSTER_ID}/${INSTANCE_ID}/licensing_channel + SECRET_NAME_LICENSING_VERSION=${ACCOUNT_ID}/${CLUSTER_ID}/${INSTANCE_ID}/licensing_version + SECRET_NAME_CPFS_CHANNEL=${ACCOUNT_ID}/${CLUSTER_ID}/${INSTANCE_ID}/cpfs_channel + SECRET_NAME_CPD_PLATFORM_CHANNEL=${ACCOUNT_ID}/${CLUSTER_ID}/${INSTANCE_ID}/cpd_platform_channel + SECRET_NAME_WSL_VERSION=${ACCOUNT_ID}/${CLUSTER_ID}/${INSTANCE_ID}/wsl_version + SECRET_NAME_WSL_CHANNEL=${ACCOUNT_ID}/${CLUSTER_ID}/${INSTANCE_ID}/wsl_channel + SECRET_NAME_CCS_VERSION=${ACCOUNT_ID}/${CLUSTER_ID}/${INSTANCE_ID}/ccs_version + SECRET_NAME_CCS_CHANNEL=${ACCOUNT_ID}/${CLUSTER_ID}/${INSTANCE_ID}/ccs_channel + SECRET_NAME_WS_RUNTIMES_VERSION=${ACCOUNT_ID}/${CLUSTER_ID}/${INSTANCE_ID}/ws_runtimes_version + SECRET_NAME_WS_RUNTIMES_CHANNEL=${ACCOUNT_ID}/${CLUSTER_ID}/${INSTANCE_ID}/ws_runtimes_channel + SECRET_NAME_DATAREFINERY_VERSION=${ACCOUNT_ID}/${CLUSTER_ID}/${INSTANCE_ID}/datarefinery_version + SECRET_NAME_DATAREFINERY_CHANNEL=${ACCOUNT_ID}/${CLUSTER_ID}/${INSTANCE_ID}/datarefinery_channel + SECRET_NAME_RABBITMQ_CHANNEL=${ACCOUNT_ID}/${CLUSTER_ID}/${INSTANCE_ID}/opencontent_rabbitmq_channel + SECRET_NAME_ELASTICSEARCH_CHANNEL=${ACCOUNT_ID}/${CLUSTER_ID}/${INSTANCE_ID}/opencontent_elasticsearch_channel + + echo + echo + echo "Updating secrets in Secret Manager" + echo "================================================================================" + + echo + echo "Debug:" + echo " - ACCOUNT_ID ............................................ ${ACCOUNT_ID}" + echo " - REGION_ID ............................................. ${REGION_ID}" + echo " - CLUSTER_ID ............................................ ${CLUSTER_ID}" + echo " - INSTANCE_ID ........................................... ${INSTANCE_ID}" + echo " - SECRET_NAME_NAMESPACE_SCOPE_CHANNEL ................... ${SECRET_NAME_NAMESPACE_SCOPE_CHANNEL}" + echo " - SECRET_NAME_LICENSING_CHANNEL ......................... ${SECRET_NAME_LICENSING_CHANNEL}" + echo " - SECRET_NAME_LICENSING_VERSION ......................... ${SECRET_NAME_LICENSING_VERSION}" + echo " - SECRET_NAME_CPFS_CHANNEL .............................. ${SECRET_NAME_CPFS_CHANNEL}" + echo " - SECRET_NAME_CPD_PLATFORM_CHANNEL ...................... ${SECRET_NAME_CPD_PLATFORM_CHANNEL}" + echo " - SECRET_NAME_WSL_VERSION ............................... ${SECRET_NAME_WSL_VERSION}" + echo " - SECRET_NAME_WSL_CHANNEL ............................... ${SECRET_NAME_WSL_CHANNEL}" + echo " - SECRET_NAME_CCS_VERSION ............................... ${SECRET_NAME_CCS_VERSION}" + echo " - SECRET_NAME_CCS_CHANNEL ............................... ${SECRET_NAME_CCS_CHANNEL}" + echo " - SECRET_NAME_WS_RUNTIMES_VERSION ....................... ${SECRET_NAME_WS_RUNTIMES_VERSION}" + echo " - SECRET_NAME_WS_RUNTIMES_CHANNEL ....................... ${SECRET_NAME_WS_RUNTIMES_CHANNEL}" + echo " - SECRET_NAME_DATAREFINERY_VERSION ...................... ${SECRET_NAME_DATAREFINERY_VERSION}" + echo " - SECRET_NAME_DATAREFINERY_CHANNEL ...................... ${SECRET_NAME_DATAREFINERY_CHANNEL}" + echo " - SECRET_NAME_RABBITMQ_CHANNEL .......................... ${SECRET_NAME_RABBITMQ_CHANNEL}" + echo " - SECRET_NAME_ELASTICSEARCH_CHANNEL ..................... ${SECRET_NAME_ELASTICSEARCH_CHANNEL}" + echo + + + sm_update_secret $SECRET_NAME_NAMESPACE_SCOPE_CHANNEL "{\"namespace_scope_channel\": \"$NAMESPACE_SCOPE_CHANNEL\" }" + sm_update_secret $SECRET_NAME_LICENSING_CHANNEL "{\"licensing_channel\": \"$LICENSING_CHANNEL\" }" + sm_update_secret $SECRET_NAME_LICENSING_VERSION "{\"licensing_version\": \"$LICENSING_VERSION\" }" + sm_update_secret $SECRET_NAME_CPFS_CHANNEL "{\"cpfs_channel\": \"$CPFS_CHANNEL\" }" + sm_update_secret $SECRET_NAME_CPD_PLATFORM_CHANNEL "{\"cpd_platform_channel\": \"$CPD_PLATFORM_CHANNEL\" }" + sm_update_secret $SECRET_NAME_WSL_VERSION "{\"wsl_version\": \"$WSL_VERSION\" }" + sm_update_secret $SECRET_NAME_WSL_CHANNEL "{\"wsl_channel\": \"$WSL_CHANNEL\" }" + sm_update_secret $SECRET_NAME_CCS_VERSION "{\"ccs_version\": \"$CCS_VERSION\" }" + sm_update_secret $SECRET_NAME_CCS_CHANNEL "{\"ccs_channel\": \"$CCS_CHANNEL\" }" + sm_update_secret $SECRET_NAME_WS_RUNTIMES_VERSION "{\"ws_runtimes_version\": \"$WS_RUNTIMES_VERSION\" }" + sm_update_secret $SECRET_NAME_WS_RUNTIMES_CHANNEL "{\"ws_runtimes_channel\": \"$WS_RUNTIMES_CHANNEL\" }" + sm_update_secret $SECRET_NAME_DATAREFINERY_VERSION "{\"datarefinery_version\": \"$DATAREFINERY_VERSION\" }" + sm_update_secret $SECRET_NAME_DATAREFINERY_CHANNEL "{\"datarefinery_channel\": \"$DATAREFINERY_CHANNEL\" }" + sm_update_secret $SECRET_NAME_RABBITMQ_CHANNEL "{\"opencontent_rabbitmq_channel\": \"$RABBITMQ_CHANNEL\" }" + sm_update_secret $SECRET_NAME_ELASTICSEARCH_CHANNEL "{\"opencontent_elasticsearch_channel\": \"$ELASTICSEARCH_CHANNEL\" }" + + + echo "Done" + + + restartPolicy: Never + serviceAccountName: presync-cpd-olm-sa + volumes: + - name: aws + secret: + secretName: aws + defaultMode: 420 + optional: false + backoffLimit: 4 + +{{- end }} diff --git a/instance-applications/010-ibm-sync-jobs/templates/01-ibm-mas_suite_certs_Job.yaml b/instance-applications/010-ibm-sync-jobs/templates/01-ibm-mas_suite_certs_Job.yaml new file mode 100644 index 000000000..de825c20a --- /dev/null +++ b/instance-applications/010-ibm-sync-jobs/templates/01-ibm-mas_suite_certs_Job.yaml @@ -0,0 +1,146 @@ +{{- if and (eq .Values.dns_provider "cis") (eq .Values.mas_manual_cert_mgmt "True") }} +{{- if not (empty .Values.manual_certs) }} +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: ibm-suite-certs-{{ .Values.manual_certs | toYaml | adler32sum }} + namespace: mas-{{ .Values.instance_id }}-syncres + annotations: + argocd.argoproj.io/sync-wave: "00" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + ttlSecondsAfterFinished: 120 + template: + metadata: + labels: + app: "sync-job" +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 8 }} +{{- end }} + spec: + containers: + - name: suite-certs-role-run + image: quay.io/ibmmas/cli:9.4.0-pre.gitops + imagePullPolicy: IfNotPresent + + env: + - name: ACCOUNT_ID + value: "{{ .Values.account_id }}" + - name: CLUSTER_ID + value: "{{ .Values.cluster_id }}" + - name: MAS_INSTANCE_ID + value: "{{ .Values.instance_id }}" + + # dns + - name: DNS_PROVIDER + value: "{{ .Values.dns_provider }}" + - name: MAS_MANUAL_CERT_MGMT + value: "{{ .Values.mas_manual_cert_mgmt }}" + + - name: MAS_WORKSPACE_ID + value: "{{ .Values.mas_workspace_id }}" + - name: MAS_CONFIG_DIR + value: "{{ .Values.mas_config_dir }}" + - name: CIS_CRN + value: "{{ .Values.cis_crn }}" + - name: CIS_SUBDOMAIN + value: "{{ .Values.cis_subdomain }}" + - name: CIS_PROXY + value: "{{ .Values.cis_proxy }}" + + # Hard-coded for now: + - name: AVP_TYPE + value: "aws" + + volumeMounts: + - name: "ibm-suite-certs" + mountPath: /etc/mas/creds/suite_certs + command: + - /bin/sh + - -c + - | + + set -e + export MAS_CONFIG_DIR=${MAS_CONFIG_DIR:-"/tmp/suite_certs/configs"} + + export CIS_APIKEY=$(cat /etc/mas/creds/suite_certs/cis_apikey) + MAS_MANUAL_CERTS_YAML=/etc/mas/creds/suite_certs/manual_certs.yaml + + echo "" + echo "================================================================================" + echo "Settings" + echo "================================================================================" + echo "DNS Provider ........................ ${COLOR_MAGENTA}${DNS_PROVIDER}" + echo "Manual Cert Mgmt Flag ............... ${COLOR_MAGENTA}${MAS_MANUAL_CERT_MGMT}" + echo "MAS Instance Id ..................... ${COLOR_MAGENTA}${MAS_INSTANCE_ID}" + echo "MAS Workspace ID .................... ${COLOR_MAGENTA}${MAS_WORKSPACE_ID}" + echo "MAS CONFIG DIR ...................... ${COLOR_MAGENTA}${MAS_CONFIG_DIR}" + echo "CIS CRN ............................. ${COLOR_MAGENTA}${CIS_CRN}" + echo "IBM CLOUD APIKEY .................... ${COLOR_MAGENTA}${CIS_APIKEY:0:4}" + echo "CIS subdomain ....................... ${COLOR_MAGENTA}${CIS_SUBDOMAIN}" + echo "CIS proxy ........................... ${COLOR_MAGENTA}${CIS_PROXY}" + + echo "MAS Manual Certs YAML location ...... ${COLOR_MAGENTA}${MAS_MANUAL_CERTS_YAML}" + + echo "" + echo "================================================================================" + + #create files as suite_cert role requires in MAS_CONFIG_DIR folder + appList=( + "core" + "assist" + "iot" + "manage" + "monitor" + "optimizer" + "predict" + "visualinspection" + ) + + if [[ -n "$MAS_MANUAL_CERTS_YAML" ]] && [[ -s "$MAS_MANUAL_CERTS_YAML" ]]; then + + for app in ${appList[@]}; do + + echo "Copying certs for $app into ${MAS_CONFIG_DIR}/certs/$app/ " + mkdir -p ${MAS_CONFIG_DIR}/certs/$app + + tls_crt_filter_query=.manual_certs.${app}_tls_crt + tls_key_filter_query=.manual_certs.${app}_tls_key + ca_crt_filter_query=.manual_certs.${app}_ca_crt + + /usr/bin/yq eval "$tls_crt_filter_query" ${MAS_MANUAL_CERTS_YAML} | base64 --decode > $MAS_CONFIG_DIR/certs/$app/tls.crt + /usr/bin/yq eval "$tls_key_filter_query" ${MAS_MANUAL_CERTS_YAML} | base64 --decode > $MAS_CONFIG_DIR/certs/$app/tls.key + /usr/bin/yq eval "$ca_crt_filter_query" ${MAS_MANUAL_CERTS_YAML} | base64 --decode > $MAS_CONFIG_DIR/certs/$app/ca.crt + + echo "Done" + done + fi + # Ref - https://github.com/ibm-mas/ansible-devops/blob/master/ibm/mas_devops/roles/suite_certs/README.md + # Set GITOPS flag so that suite_certs roles doesn't create the certificate resources but does do the dns management + export GITOPS=True + export ROLE_NAME=suite_certs + ansible-playbook ibm.mas_devops.run_role + rc=$? + echo "Role suite_certs completes with rc=${rc}" + [ $rc -ne 0 ] && exit $rc + + exit 0 + + restartPolicy: Never + + serviceAccountName: ibm-suite-certs-job + volumes: + - name: "ibm-suite-certs" + secret: + secretName: "ibm-suite-certs" + defaultMode: 420 + optional: false + + backoffLimit: 4 + +{{- end }} +{{- end }} diff --git a/instance-applications/010-ibm-sync-jobs/templates/01-ibm-mas_suite_dns_Job.yaml b/instance-applications/010-ibm-sync-jobs/templates/01-ibm-mas_suite_dns_Job.yaml new file mode 100644 index 000000000..255bf4ebc --- /dev/null +++ b/instance-applications/010-ibm-sync-jobs/templates/01-ibm-mas_suite_dns_Job.yaml @@ -0,0 +1,182 @@ +{{- if and (eq .Values.dns_provider "cis") (eq .Values.mas_manual_cert_mgmt "False") }} +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: ibm-suite-dns + namespace: mas-{{ .Values.instance_id }}-syncres + annotations: + argocd.argoproj.io/sync-wave: "00" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + ttlSecondsAfterFinished: 120 + template: + metadata: + labels: + app: "sync-job" +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 8 }} +{{- end }} + spec: + containers: + - name: suite-dns-role-run + image: quay.io/ibmmas/cli:9.4.0-pre.gitops + imagePullPolicy: IfNotPresent + + env: + - name: ACCOUNT_ID + value: "{{ .Values.account_id }}" + - name: CLUSTER_ID + value: "{{ .Values.cluster_id }}" + - name: MAS_INSTANCE_ID + value: "{{ .Values.instance_id }}" + + # dns + - name: DNS_PROVIDER + value: "{{ .Values.dns_provider }}" + - name: MAS_MANUAL_CERT_MGMT + value: "{{ .Values.mas_manual_cert_mgmt }}" + + - name: MAS_WORKSPACE_ID + value: "{{ .Values.mas_workspace_id }}" + - name: MAS_DOMAIN + value: "{{ .Values.mas_domain }}" + - name: MAS_CONFIG_DIR + value: "{{ .Values.mas_config_dir }}" + + - name: CIS_EMAIL + value: "{{ .Values.cis_email }}" + - name: CIS_CRN + value: "{{ .Values.cis_crn }}" + - name: CIS_SUBDOMAIN + value: "{{ .Values.cis_subdomain }}" + - name: CIS_MAS_DOMAIN + value: "{{ .Values.cis_mas_domain }}" + + - name: OCP_CLUSTER_DOMAIN + value: "{{ .Values.ocp_cluster_domain }}" + - name: CIS_ENHANCED_SECURITY + value: "{{ .Values.cis_enhanced_security }}" + - name: CIS_WAF + value: "{{ .Values.cis_waf }}" + - name: CIS_PROXY + value: "{{ .Values.cis_proxy }}" + - name: CIS_SERVICE_NAME + value: "{{ .Values.cis_service_name }}" + + - name: UPDATE_DNS_ENTRIES + value: "{{ .Values.update_dns_entries }}" + - name: DELETE_WILDCARDS + value: "{{ .Values.delete_wildcards }}" + - name: OVERRIDE_EDGE_CERTS + value: "{{ .Values.override_edge_certs }}" + + - name: SM_AWS_REGION + value: "{{ .Values.sm_aws_region }}" + + # Hard-coded for now: + - name: AVP_TYPE + value: "aws" + + volumeMounts: + - name: "ibm-suite-dns" + mountPath: /etc/mas/creds/suite_dns + command: + - /bin/sh + - -c + - | + + set -e + + source /mascli/functions/gitops_utils + + export CIS_APIKEY=$(cat /etc/mas/creds/suite_dns/cis_apikey) + export SM_AWS_ACCESS_KEY_ID=$(cat /etc/mas/creds/suite_dns/sm_aws_access_key_id) + export SM_AWS_SECRET_ACCESS_KEY=$(cat /etc/mas/creds/suite_dns/sm_aws_secret_access_key) + + + echo "" + echo "================================================================================" + echo "Settings" + echo "================================================================================" + echo "DNS Provider ........................ ${COLOR_MAGENTA}${DNS_PROVIDER}" + echo "Manual Cert Mgmt Flag ............... ${COLOR_MAGENTA}${MAS_MANUAL_CERT_MGMT}" + echo "Account Id .......................... ${COLOR_MAGENTA}${ACCOUNT_ID}" + echo "Cluster Id .......................... ${COLOR_MAGENTA}${CLUSTER_ID}" + echo "MAS Instance Id ..................... ${COLOR_MAGENTA}${MAS_INSTANCE_ID}" + echo "MAS Workspace ID .................... ${COLOR_MAGENTA}${MAS_WORKSPACE_ID}" + echo "MAS CONFIG DIR ...................... ${COLOR_MAGENTA}${MAS_CONFIG_DIR}" + echo "CIS Email ........................... ${COLOR_MAGENTA}${CIS_EMAIL}" + echo "CIS CRN ............................. ${COLOR_MAGENTA}${CIS_CRN}" + echo "IBM CLOUD APIKEY .................... ${COLOR_MAGENTA}${CIS_APIKEY:0:4}" + echo "MAS Domain .......................... ${COLOR_MAGENTA}${MAS_DOMAIN}" + echo "CIS MAS Domain ...................... ${COLOR_MAGENTA}${CIS_MAS_DOMAIN}" + echo "OCP Cluster Domain .................. ${COLOR_MAGENTA}${OCP_CLUSTER_DOMAIN}" + echo "CIS subdomain ....................... ${COLOR_MAGENTA}${CIS_SUBDOMAIN}" + echo "CIS enhanced security Flag........... ${COLOR_MAGENTA}${CIS_ENHANCED_SECURITY}" + echo "CIS WAF ............................. ${COLOR_MAGENTA}${CIS_WAF}" + echo "CIS proxy ........................... ${COLOR_MAGENTA}${CIS_PROXY}" + echo "CIS service name .................... ${COLOR_MAGENTA}${CIS_SERVICE_NAME}" + echo "update DNS entries Flag ............. ${COLOR_MAGENTA}${UPDATE_DNS_ENTRIES}" + echo "DELETE_WILDCARDS Flag ............... ${COLOR_MAGENTA}${DELETE_WILDCARDS}" + echo "OVERRIDE_EDGE_CERTS Flag ............ ${COLOR_MAGENTA}${OVERRIDE_EDGE_CERTS}" + + echo "SM_AWS_REGION ....................... ${COLOR_MAGENTA}${SM_AWS_REGION}" + echo "SM_AWS_ACCESS_KEY_ID ................ ${COLOR_MAGENTA}${SM_AWS_ACCESS_KEY_ID:0:4}" + echo "SM_AWS_SECRET_ACCESS_KEY ............ ${COLOR_MAGENTA}${SM_AWS_SECRET_ACCESS_KEY:0:4}" + + echo "" + echo "================================================================================" + export MAS_DOMAIN=$CIS_MAS_DOMAIN + mkdir -p ${MAS_CONFIG_DIR} + + sm_login + export SECRETS_KEY_SEPERATOR="/" + export SECRET_NAME=${ACCOUNT_ID}${SECRETS_KEY_SEPERATOR}${CLUSTER_ID}${SECRETS_KEY_SEPERATOR}${MAS_INSTANCE_ID}${SECRETS_KEY_SEPERATOR}suite_dns_role + export SUITE_DNS_ROLE_SECRET_FILE=$MAS_CONFIG_DIR/suite-dns-role-secret.json + sm_get_secret_file $SECRET_NAME $SUITE_DNS_ROLE_SECRET_FILE + + SUITE_DNS_ROLE_COMPLETION_STATUS=$(jq -r .completion_status $SUITE_DNS_ROLE_SECRET_FILE) + SUITE_DNS_ROLE_COMPLETION_ON=$(jq -r .completion_on $SUITE_DNS_ROLE_SECRET_FILE) + + echo "SUITE_DNS_ROLE_COMPLETION_STATUS=${SUITE_DNS_ROLE_COMPLETION_STATUS}" + echo "SUITE_DNS_ROLE_COMPLETION_ON=${SUITE_DNS_ROLE_COMPLETION_ON}" + + if [ "$SUITE_DNS_ROLE_COMPLETION_STATUS" != "success" ]; then + # Ref - https://github.com/ibm-mas/ansible-devops/blob/master/ibm/mas_devops/roles/suite_dns/README.md + export ROLE_NAME=suite_dns + ansible-playbook ibm.mas_devops.run_role + rc=$? + echo "suite_dns rc=${rc}" + [ $rc -ne 0 ] && exit $rc + + COMPLETION_ON=$(date -u +"%Y-%m-%dT%H:%M:%SZ") + echo "COMPLETION_ON=${COMPLETION_ON}" + export SECRET_VALUE="{\"completion_status\":\"success\",\"completion_on\":\"$COMPLETION_ON\"}" + + echo "" + echo "Updating Secrets Manager" + echo "--------------------------------------------------------------------------------" + sm_update_secret $SECRET_NAME $SECRET_VALUE + + exit 0 + fi + exit 0 + + + restartPolicy: Never + + serviceAccountName: ibm-suite-dns-job + volumes: + - name: "ibm-suite-dns" + secret: + secretName: "ibm-suite-dns" + defaultMode: 420 + optional: false + + backoffLimit: 4 + +{{- end }} \ No newline at end of file diff --git a/instance-applications/010-ibm-sync-jobs/templates/PostDelete-aws-docdb-remove-user_Job.yaml b/instance-applications/010-ibm-sync-jobs/templates/PostDelete-aws-docdb-remove-user_Job.yaml new file mode 100644 index 000000000..f059c8ff0 --- /dev/null +++ b/instance-applications/010-ibm-sync-jobs/templates/PostDelete-aws-docdb-remove-user_Job.yaml @@ -0,0 +1,127 @@ +{{- if not (empty .Values.docdb) }} +--- + +# Deletes user "masinst_${MAS_INSTANCE_ID}" from docdb an deletes the acc/cluster/instance/mongo#password secret from AWS SM + +apiVersion: batch/v1 +kind: Job +metadata: + name: aws-docdb-remove-user + namespace: mas-{{ .Values.instance_id }}-syncres + annotations: + argocd.argoproj.io/hook: "PostDelete" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + template: + metadata: + labels: + app: "sync-job" +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 8 }} +{{- end }} + spec: + containers: + - name: aws-docdb-process-user + image: quay.io/ibmmas/cli:9.4.0-pre.gitops + imagePullPolicy: IfNotPresent + env: + + - name: ACCOUNT_ID + value: "{{ .Values.account_id }}" + - name: CLUSTER_ID + value: "{{ .Values.cluster_id }}" + - name: MAS_INSTANCE_ID + value: "{{ .Values.instance_id }}" + + - name: DOCDB_MASTER_USERNAME + valueFrom: + secretKeyRef: + name: aws-docdb + key: docdb_master_username + - name: DOCDB_MASTER_PASSWORD + valueFrom: + secretKeyRef: + name: aws-docdb + key: docdb_master_password + - name: DOCDB_MASTER_INFO + valueFrom: + secretKeyRef: + name: aws-docdb + key: docdb_master_info + + - name: SM_AWS_REGION + valueFrom: + secretKeyRef: + name: aws + key: aws_default_region + - name: SM_AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: aws + key: aws_access_key_id + - name: SM_AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: aws + key: aws_secret_access_key + + command: + - /bin/sh + - -c + - | + + echo + echo "================================================================================" + echo "/opt/app-root/src/run-role.sh aws_documentdb_user" + echo "================================================================================" + + export MAS_CONFIG_DIR="/tmp/${MAS_INSTANCE_ID}/aws_documentdb_user" + export USER_ACTION="remove" + + # Grab one of the hosts/ports out of docdb master info + export DOCDB_HOST=$(echo "${DOCDB_MASTER_INFO}" | sed 's/\\n/\n/g' | sed 's/\\"//g' | /usr/bin/yq '.config.hosts[0].host') + export DOCDB_PORT=$(echo "${DOCDB_MASTER_INFO}" | sed 's/\\n/\n/g' | /usr/bin/yq '.config.hosts[0].port') + + echo "Params:" + echo " - MAS_INSTANCE_ID ................... ${MAS_INSTANCE_ID}" + echo " - MAS_CONFIG_DIR ................... ${MAS_CONFIG_DIR}" + echo " - DOCDB_HOST ................... ${DOCDB_HOST}" + echo " - DOCDB_PORT ................... ${DOCDB_PORT}" + echo " - DOCDB_MASTER_USERNAME ................... ${DOCDB_MASTER_USERNAME:0:2}" + echo " - DOCDB_MASTER_PASSWORD ................... ${DOCDB_MASTER_PASSWORD:0:2}" + echo " - DOCDB_INSTANCE_PASSWORD ................... ${DOCDB_INSTANCE_PASSWORD:0:2}" + echo " - USER_ACTION ................... ${USER_ACTION}" + echo + + mkdir -p ${MAS_CONFIG_DIR} + /opt/app-root/src/run-role.sh aws_documentdb_user || exit $? + + echo + echo "================================================================================" + echo "Deleting Instance Mongo Secret" + echo "================================================================================" + + SECRETS_KEY_SEPERATOR="/" + SECRET_NAME_MONGO=${ACCOUNT_ID}${SECRETS_KEY_SEPERATOR}${CLUSTER_ID}${SECRETS_KEY_SEPERATOR}${MAS_INSTANCE_ID}${SECRETS_KEY_SEPERATOR}mongo + + echo "Params:" + echo " - ACCOUNT_ID ................... ${ACCOUNT_ID}" + echo " - CLUSTER_ID ................... ${CLUSTER_ID}" + echo " - MAS_INSTANCE_ID ................... ${MAS_INSTANCE_ID}" + echo " - SECRET_NAME_MONGO ................... ${SECRET_NAME_MONGO}" + echo " - SM_AWS_REGION ................... ${SM_AWS_REGION}" + echo " - SM_AWS_ACCESS_KEY_ID ................... ${SM_AWS_ACCESS_KEY_ID:0:2}" + echo " - SM_AWS_SECRET_ACCESS_KEY ................... ${SM_AWS_SECRET_ACCESS_KEY:0:2}" + echo + + source /mascli/functions/gitops_utils + sm_login + sm_delete_secret "${SECRET_NAME_MONGO}" || exit $? + + restartPolicy: Never + serviceAccountName: aws-docdb-user-job + backoffLimit: 4 +{{- end }} diff --git a/instance-applications/010-ibm-sync-jobs/values.yaml b/instance-applications/010-ibm-sync-jobs/values.yaml new file mode 100644 index 000000000..ed97d539c --- /dev/null +++ b/instance-applications/010-ibm-sync-jobs/values.yaml @@ -0,0 +1 @@ +--- diff --git a/instance-applications/080-ibm-cp4d/Chart.yaml b/instance-applications/080-ibm-cp4d/Chart.yaml new file mode 100644 index 000000000..ce98b1436 --- /dev/null +++ b/instance-applications/080-ibm-cp4d/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +name: ibm-cp4d +description: IBM Cloud Pak for Data (CP4D) +type: application +version: 1.0.0 diff --git a/instance-applications/080-ibm-cp4d/README.md b/instance-applications/080-ibm-cp4d/README.md new file mode 100644 index 000000000..16685f89f --- /dev/null +++ b/instance-applications/080-ibm-cp4d/README.md @@ -0,0 +1,3 @@ +IBM Cloud Pak for Data (CP4D) +=============================================================================== +Deploys and configures CP4D needed for `MAS Assist` and `MAS Predict`. Deploys the CP4D platform operator and its dependencies. diff --git a/instance-applications/080-ibm-cp4d/templates/00-ibm-cp4d_Namespace.yaml b/instance-applications/080-ibm-cp4d/templates/00-ibm-cp4d_Namespace.yaml new file mode 100644 index 000000000..f42650146 --- /dev/null +++ b/instance-applications/080-ibm-cp4d/templates/00-ibm-cp4d_Namespace.yaml @@ -0,0 +1,40 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: "{{ .Values.cpd_operators_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "080" + labels: + argocd.argoproj.io/managed-by: {{ .Values.argo_namespace }} +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} + + +--- +apiVersion: v1 +kind: Namespace +metadata: + name: "{{ .Values.cpd_instance_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "080" + labels: + argocd.argoproj.io/managed-by: {{ .Values.argo_namespace }} +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 6 }} +{{- end }} + + +--- +apiVersion: v1 +kind: Namespace +metadata: + name: "{{ .Values.cpd_cs_control_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "080" + labels: + argocd.argoproj.io/managed-by: {{ .Values.argo_namespace }} +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 6 }} +{{- end }} diff --git a/instance-applications/080-ibm-cp4d/templates/01-ibm-cp4d_CatalogSources.yaml b/instance-applications/080-ibm-cp4d/templates/01-ibm-cp4d_CatalogSources.yaml new file mode 100644 index 000000000..85d68e96c --- /dev/null +++ b/instance-applications/080-ibm-cp4d/templates/01-ibm-cp4d_CatalogSources.yaml @@ -0,0 +1,46 @@ +{{- if not (empty .Values.cpd_product_version) }} +{{- if eq .Values.cpd_product_version "4.8.0" }} +--- +apiVersion: operators.coreos.com/v1alpha1 +kind: CatalogSource +metadata: + name: cpd-platform + namespace: "{{ .Values.cpd_operators_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "081" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + displayName: ibm-cp-datacore-4.0.0+20231213.115030 + image: icr.io/cpopen/ibm-cpd-platform-operator-catalog@sha256:01bc0f165348d911d134a5bbd36bbe5f9c5a53877696b005d9a19b5d74623b4d + publisher: IBM + sourceType: grpc + updateStrategy: + registryPoll: + interval: 30m0s + +--- +apiVersion: operators.coreos.com/v1alpha1 +kind: CatalogSource +metadata: + name: opencloud-operators + namespace: "{{ .Values.cpd_operators_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "081" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + displayName: ibm-cp-common-services-4.3.0 + image: icr.io/cpopen/ibm-common-service-catalog@sha256:5f2ada10db36dd609913f806fc44051186a1b719a0c1e04edfae5a6807b0eb26 + publisher: IBM + sourceType: grpc + updateStrategy: + registryPoll: + interval: 30m0s + +{{- end }} +{{- end }} \ No newline at end of file diff --git a/instance-applications/080-ibm-cp4d/templates/01-ibm-entitlement_Secret.yaml b/instance-applications/080-ibm-cp4d/templates/01-ibm-entitlement_Secret.yaml new file mode 100644 index 000000000..8540a0d04 --- /dev/null +++ b/instance-applications/080-ibm-cp4d/templates/01-ibm-entitlement_Secret.yaml @@ -0,0 +1,34 @@ +--- +kind: Secret +apiVersion: v1 +metadata: + name: ibm-entitlement-key + namespace: {{ .Values.cpd_operators_namespace }} + annotations: + argocd.argoproj.io/sync-wave: "081" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: >- + {{ .Values.ibm_entitlement_key }} + + +--- +kind: Secret +apiVersion: v1 +metadata: + name: ibm-entitlement-key + namespace: {{ .Values.cpd_instance_namespace }} + annotations: + argocd.argoproj.io/sync-wave: "081" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: >- + {{ .Values.ibm_entitlement_key }} diff --git a/instance-applications/080-ibm-cp4d/templates/02-ibm-cp4d_rbac.yaml b/instance-applications/080-ibm-cp4d/templates/02-ibm-cp4d_rbac.yaml new file mode 100644 index 000000000..9933eb583 --- /dev/null +++ b/instance-applications/080-ibm-cp4d/templates/02-ibm-cp4d_rbac.yaml @@ -0,0 +1,1769 @@ +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: ibm-namespace-scope-operator + namespace: {{ .Values.cpd_operators_namespace }} + annotations: + argocd.argoproj.io/sync-wave: "082" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} + +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: "{{ .Values.cpd_admin_login_sa }}" + namespace: {{ .Values.cpd_instance_namespace }} + annotations: + argocd.argoproj.io/sync-wave: "082" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: nss-managed-role-from-{{ .Values.cpd_operators_namespace }} + namespace: {{ .Values.cpd_operators_namespace }} + annotations: + argocd.argoproj.io/sync-wave: "082" + labels: + managed-by: nss +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +rules: + - apiGroups: + - "*" + resources: + - "*" + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - deletecollection + +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: nss-managed-role-from-{{ .Values.cpd_operators_namespace }} + namespace: {{ .Values.cpd_instance_namespace }} + annotations: + argocd.argoproj.io/sync-wave: "082" + labels: + managed-by: nss +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +rules: + - verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - deletecollection + apiGroups: + - '*' + resources: + - '*' + +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: nss-runtime-managed-role-from-{{ .Values.cpd_operators_namespace }} + namespace: {{ .Values.cpd_instance_namespace }} + annotations: + argocd.argoproj.io/sync-wave: "082" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +rules: + - verbs: + - create + - delete + - get + - list + - patch + - update + - watch + apiGroups: + - '' + resources: + - configmaps + - verbs: + - get + - patch + - update + apiGroups: + - '' + resources: + - configmaps/status + - verbs: + - create + - patch + apiGroups: + - '' + resources: + - events + - verbs: + - create + - delete + - get + - list + - patch + - watch + apiGroups: + - '' + resources: + - persistentvolumeclaims + - verbs: + - create + - delete + - get + - list + - patch + - watch + apiGroups: + - '' + resources: + - pods + - verbs: + - create + - delete + - get + - list + - patch + - watch + apiGroups: + - '' + resources: + - pods/exec + - verbs: + - get + apiGroups: + - '' + resources: + - pods/status + - verbs: + - create + - delete + - get + - list + - patch + - update + - watch + apiGroups: + - '' + resources: + - secrets + - verbs: + - get + - patch + - update + apiGroups: + - '' + resources: + - secrets/status + - verbs: + - create + - get + - list + - patch + - update + - watch + apiGroups: + - '' + resources: + - serviceaccounts + - verbs: + - create + - delete + - get + - list + - patch + - update + - watch + apiGroups: + - '' + resources: + - services + - verbs: + - get + - list + - patch + - update + apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - verbs: + - get + - list + - patch + - update + apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + - verbs: + - get + - list + - update + apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + - verbs: + - create + - delete + - get + - list + - patch + - update + - watch + apiGroups: + - apps + resources: + - deployments + - verbs: + - create + - delete + - get + - list + - patch + - watch + apiGroups: + - batch + resources: + - jobs + - verbs: + - create + - get + - update + apiGroups: + - coordination.k8s.io + resources: + - leases + - verbs: + - create + - delete + - get + - list + - patch + - watch + apiGroups: + - monitoring.coreos.com + resources: + - podmonitors + - verbs: + - create + - delete + - get + - list + - patch + - update + - watch + apiGroups: + - policy + resources: + - poddisruptionbudgets + - verbs: + - create + - delete + - get + - list + - patch + - update + - watch + apiGroups: + - postgresql.k8s.enterprisedb.io + resources: + - backups + - verbs: + - get + - patch + - update + apiGroups: + - postgresql.k8s.enterprisedb.io + resources: + - backups/status + - verbs: + - create + - delete + - get + - list + - patch + - update + - watch + apiGroups: + - postgresql.k8s.enterprisedb.io + resources: + - clusters + - verbs: + - update + apiGroups: + - postgresql.k8s.enterprisedb.io + resources: + - clusters/finalizers + - verbs: + - get + - patch + - update + - watch + apiGroups: + - postgresql.k8s.enterprisedb.io + resources: + - clusters/status + - verbs: + - create + - delete + - get + - list + - patch + - update + - watch + apiGroups: + - postgresql.k8s.enterprisedb.io + resources: + - poolers + - verbs: + - update + apiGroups: + - postgresql.k8s.enterprisedb.io + resources: + - poolers/finalizers + - verbs: + - get + - patch + - update + - watch + apiGroups: + - postgresql.k8s.enterprisedb.io + resources: + - poolers/status + - verbs: + - create + - delete + - get + - list + - patch + - update + - watch + apiGroups: + - postgresql.k8s.enterprisedb.io + resources: + - scheduledbackups + - verbs: + - get + - patch + - update + apiGroups: + - postgresql.k8s.enterprisedb.io + resources: + - scheduledbackups/status + - verbs: + - create + - get + - list + - patch + - update + - watch + apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + - verbs: + - create + - get + - list + - patch + - update + - watch + apiGroups: + - rbac.authorization.k8s.io + resources: + - roles + - verbs: + - create + - get + - list + - patch + - watch + apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + - verbs: + - create + - delete + - get + - list + - patch + - update + - watch + apiGroups: + - '' + resources: + - secrets + - pods + - pods/exec + - pods/log + - configmaps + - verbs: + - create + - delete + - get + - list + - patch + - update + - watch + apiGroups: + - apps + resources: + - deployments + - daemonsets + - replicasets + - statefulsets + - verbs: + - create + - delete + - get + - list + - patch + - update + - watch + apiGroups: + - cpd.ibm.com + resources: + - ibmcpds + - ibmcpds/status + - ibmcpds/finalizers + - verbs: + - create + - delete + - get + - list + - patch + - update + - watch + apiGroups: + - zen.cpd.ibm.com + resources: + - zenservices + - verbs: + - create + - delete + - get + - list + - patch + - update + - watch + apiGroups: + - operator.ibm.com + resources: + - operandrequests + - operandregistries + - operandconfigs + - verbs: + - patch + - update + - get + - list + - watch + apiGroups: + - operator.ibm.com + resources: + - commonservices + - verbs: + - get + - list + - watch + - create + - update + - patch + - delete + apiGroups: + - '' + resources: + - configmaps + - verbs: + - create + - patch + apiGroups: + - '' + resources: + - events + - verbs: + - create + - delete + - get + - list + - watch + apiGroups: + - cert-manager.io + resources: + - certificates + - issuers + - verbs: + - get + - list + - patch + - update + - watch + apiGroups: + - apps + resources: + - deployments + - statefulsets + - daemonsets + - verbs: + - create + - delete + - get + - list + - patch + - update + - watch + apiGroups: + - operator.ibm.com + resources: + - commonservices + - commonservices/finalizers + - commonservices/status + - operandconfigs + - operandregistries + - verbs: + - create + - delete + - get + - list + - patch + - update + - watch + apiGroups: + - operators.coreos.com + resources: + - subscriptions + - clusterserviceversions + - verbs: + - get + apiGroups: + - '' + resources: + - namespaces + - verbs: + - get + - list + - delete + apiGroups: + - '' + resources: + - pods + - verbs: + - create + - delete + - get + - list + - patch + - update + - watch + apiGroups: + - '' + resources: + - secrets + - services + - verbs: + - create + - get + - patch + - update + apiGroups: + - '' + resources: + - events + - verbs: + - delete + apiGroups: + - certmanager.k8s.io + resources: + - certificates + - issuers + - verbs: + - delete + apiGroups: + - monitoring.operator.ibm.com + resources: + - exporters + - prometheusexts + - verbs: + - delete + apiGroups: + - operator.ibm.com + resources: + - operandrequests + - operandbindinfos + - cataloguis + - helmapis + - helmrepos + - verbs: + - delete + apiGroups: + - elasticstack.ibm.com + resources: + - elasticstacks + - verbs: + - get + apiGroups: + - '' + resources: + - secrets + resourceNames: + - ibm-common-service-operator-service-cert + - verbs: + - get + - list + - watch + - create + - delete + - update + - patch + apiGroups: + - route.openshift.io + resources: + - routes + - verbs: + - create + apiGroups: + - route.openshift.io + resources: + - routes/custom-host + - verbs: + - get + - list + - watch + apiGroups: + - route.openshift.io + resources: + - routes/status + - verbs: + - create + - delete + - get + - list + - patch + - update + - watch + apiGroups: + - rbac.authorization.k8s.io + resources: + - roles + - rolebindings + - verbs: + - create + - delete + - get + - list + - patch + - update + - watch + apiGroups: + - '' + resources: + - pods + - services + - services/finalizers + - configmaps + - secrets + - serviceaccounts + - verbs: + - create + - delete + - get + - list + - patch + - update + - watch + apiGroups: + - apps + resources: + - deployments + - daemonsets + - verbs: + - get + - create + apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + - verbs: + - update + apiGroups: + - apps + resources: + - deployments/finalizers + resourceNames: + - common-webui + - verbs: + - get + apiGroups: + - '' + resources: + - pods + - verbs: + - get + - list + apiGroups: + - apps + resources: + - replicasets + - deployments + - statefulsets + - verbs: + - create + - delete + - get + - list + - patch + - update + - watch + apiGroups: + - networking.k8s.io + resources: + - ingresses + - verbs: + - get + - list + apiGroups: + - operator.ibm.com + resources: + - commonservices + - verbs: + - create + - delete + - get + - list + - patch + - update + - watch + apiGroups: + - operators.ibm.com + resources: + - commonwebuis + - commonwebuis/finalizers + - commonwebuis/status + - switcheritems + - switcheritems/finalizers + - switcheritems/status + - verbs: + - create + - delete + - get + - list + - patch + - update + - watch + apiGroups: + - foundation.ibm.com + resources: + - navconfigurations + - navconfigurations/finalizers + - navconfigurations/status + - verbs: + - create + - delete + - get + - list + - patch + - update + - watch + apiGroups: + - certmanager.k8s.io + resources: + - certificates + - verbs: + - create + - delete + - get + - list + - patch + - update + - watch + apiGroups: + - cert-manager.io + resources: + - certificates + - verbs: + - get + apiGroups: + - batch + resources: + - jobs + - verbs: + - delete + - get + - list + apiGroups: + - operator.ibm.com + resources: + - operandrequests + - verbs: + - get + - list + - watch + - create + - delete + - update + - patch + apiGroups: + - route.openshift.io + resources: + - routes + - verbs: + - create + apiGroups: + - route.openshift.io + resources: + - routes/custom-host + - verbs: + - get + - list + - watch + apiGroups: + - route.openshift.io + resources: + - routes/status + - verbs: + - create + - delete + - get + - list + - patch + - update + - watch + apiGroups: + - '' + resources: + - pods + - services + - services/finalizers + - endpoints + - persistentvolumeclaims + - events + - configmaps + - secrets + - verbs: + - create + - delete + - get + - list + - patch + - update + - watch + apiGroups: + - apps + resources: + - deployments + - daemonsets + - replicasets + - statefulsets + - verbs: + - create + - delete + - get + - list + - patch + - update + - watch + apiGroups: + - oidc.security.ibm.com + resources: + - clients + - clients/finalizers + - clients/status + - verbs: + - get + - create + apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + - verbs: + - update + apiGroups: + - apps + resources: + - deployments/finalizers + resourceNames: + - ibm-iam-operator + - verbs: + - get + apiGroups: + - '' + resources: + - pods + - verbs: + - get + apiGroups: + - apps + resources: + - replicasets + - deployments + - verbs: + - create + - delete + - get + - list + - patch + - update + - watch + apiGroups: + - operator.ibm.com + resources: + - authentications + - verbs: + - update + - patch + apiGroups: + - operator.ibm.com + resources: + - authentications/finalizers + - verbs: + - get + - patch + - update + apiGroups: + - operator.ibm.com + resources: + - authentications/status + - verbs: + - get + - list + apiGroups: + - operator.ibm.com + resources: + - commonservices + - verbs: + - create + - delete + - get + - list + - patch + - update + - watch + apiGroups: + - cert-manager.io + resources: + - certificates + - certificaterequests + - orders + - challenges + - issuers + - verbs: + - delete + - get + - list + - watch + apiGroups: + - certmanager.k8s.io + resources: + - certificates + - verbs: + - create + - delete + - get + - list + - patch + - update + - watch + apiGroups: + - networking.k8s.io + resources: + - ingresses + - verbs: + - create + - delete + - get + - list + - patch + - update + - watch + apiGroups: + - batch + resources: + - jobs + - verbs: + - create + - get + - list + - patch + - update + - watch + apiGroups: + - '' + resources: + - serviceaccounts + - verbs: + - create + apiGroups: + - rbac.authorization.k8s.io + resources: + - roles + - rolebindings + - verbs: + - create + - delete + - get + - list + - patch + - update + - watch + apiGroups: + - '' + resources: + - pods + - services + - services/finalizers + - serviceaccounts + - endpoints + - persistentvolumeclaims + - events + - configmaps + - secrets + - verbs: + - create + - delete + - get + - list + - patch + - update + - watch + apiGroups: + - apps + resources: + - deployments + - daemonsets + - replicasets + - statefulsets + - verbs: + - get + - create + apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + - verbs: + - update + apiGroups: + - apps + resources: + - deployments/finalizers + resourceNames: + - ibm-mongodb-operator + - verbs: + - create + - delete + - get + - list + - patch + - update + - watch + apiGroups: + - operator.ibm.com + resources: + - mongodbs + - mongodbs/finalizers + - mongodbs/status + - verbs: + - delete + - get + - list + - watch + apiGroups: + - certmanager.k8s.io + resources: + - certificates + - certificaterequests + - orders + - challenges + - issuers + - verbs: + - create + - delete + - get + - list + - patch + - update + - watch + apiGroups: + - cert-manager.io + resources: + - certificates + - certificaterequests + - orders + - challenges + - issuers + - verbs: + - delete + - get + - list + apiGroups: + - operator.ibm.com + resources: + - operandrequests + - verbs: + - create + - get + - delete + - watch + - update + - list + - patch + - deletecollection + apiGroups: + - '' + - batch + - extensions + - apps + - policy + - rbac.authorization.k8s.io + - autoscaling + - route.openshift.io + - authorization.openshift.io + - networking.k8s.io + - metrics.k8s.io + - project.openshift.io + - template.openshift.io + - autoscaling.k8s.io + resources: + - pods + - pods/log + - pods/eviction + - poddisruptionbudgets + - projects + - secrets + - jobs + - configmaps + - deployments + - deployments/scale + - daemonsets + - statefulsets + - statefulsets/scale + - replicasets + - replicationcontrollers + - services + - services/finalizers + - persistentvolumes + - persistentvolumeclaims + - cronjobs + - pods/exec + - pods/portforward + - serviceaccounts + - namespaces + - roles + - rolebindings + - horizontalpodautoscalers + - verticalpodautoscalers + - routes + - routes/custom-host + - ingresses + - endpoints + - cronjob + - networkpolicies + - events + - jobs/status + - pods/status + - resourcequotas + - resourcequotas/status + - processedtemplates + - verbs: + - create + - delete + - get + - list + - patch + - update + - watch + apiGroups: + - cpd.ibm.com + resources: + - cpdinstalls + - cpdinstalls/spec + - cpdinstalls/status + - verbs: + - create + - delete + - list + - watch + - get + - patch + - update + apiGroups: + - build.openshift.io + resources: + - buildconfigs + - buildconfigs/instantiate + - buildconfigs/instantiatebinary + - buildconfigs/webhooks + - buildlogs + - builds + - builds/clone + - builds/details + - builds/log + - verbs: + - create + - delete + - list + - watch + - get + - patch + - update + apiGroups: + - image.openshift.io + resources: + - imagestreams + - imagestreams/layers + - imagestreams/secrets + - imagestreams/status + - imagestreamimages + - imagestreamimports + - imagestreammappings + - imagestreamtags + - verbs: + - update + apiGroups: + - apps + resources: + - deployments/finalizers + resourceNames: + - cpd-zen-operator + - verbs: + - create + - delete + - get + - list + - patch + - update + - watch + apiGroups: + - zen.cpd.ibm.com + resources: + - zenservice + - zenservices + - zenservice/status + - zenservices/status + - zenextension + - zenextensions + - zenextension/status + - zenextensions/status + - verbs: + - get + - list + apiGroups: + - ibm.com + resources: + - paralleljob + - paralleljobs + - paralleljob/status + - paralleljobs/status + - verbs: + - create + - delete + - get + - list + - patch + - update + - watch + apiGroups: + - operator.ibm.com + resources: + - commonservices + - verbs: + - create + - delete + - get + - list + - patch + - update + - watch + apiGroups: + - certmanager.k8s.io + resources: + - issuers + - issuers/status + - issuers/finalizers + - certificates + - certificates/status + - certificates/finalizers + - verbs: + - create + - delete + - get + - list + - patch + - update + - watch + apiGroups: + - cert-manager.io + resources: + - issuers + - issuers/status + - issuers/finalizers + - certificates + - certificates/status + - certificates/finalizers + - verbs: + - create + - get + - list + - patch + - update + - watch + - delete + apiGroups: + - oidc.security.ibm.com + resources: + - client + - clients + - verbs: + - create + - get + - list + - patch + - update + - watch + - delete + apiGroups: + - operator.ibm.com + resources: + - operandrequest + - operandrequests + - operandbindinfo + - operandbindinfos + - verbs: + - get + - list + - watch + apiGroups: + - operators.coreos.com + resources: + - clusterserviceversions + - verbs: + - get + - list + - watch + - update + - patch + apiGroups: + - operators.coreos.com + resources: + - operatorconditions + - operatorconditions/status + - verbs: + - get + - create + apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + - verbs: + - get + - list + - watch + - update + - patch + - create + - delete + - deletecollection + apiGroups: + - ibm.com + resources: + - resourceplans + - resourcematches + - verbs: + - create + - get + - list + - patch + - update + - watch + - delete + apiGroups: + - networking.k8s.io + resources: + - networkpolicies + - verbs: + - create + - delete + - get + - list + - patch + - update + - watch + apiGroups: + - postgresql.k8s.enterprisedb.io + resources: + - clusters + - verbs: + - create + - delete + - get + - list + - patch + - update + - watch + apiGroups: + - operator.ibm.com + resources: + - operandconfigs + - operandconfigs/status + - operandconfigs/finalizers + - operandregistries + - operandregistries/status + - operandregistries/finalizers + - operandrequests + - operandrequests/status + - operandrequests/finalizers + - operandbindinfos + - operandbindinfos/status + - operandbindinfos/finalizers + - verbs: + - create + - delete + - get + - list + - patch + - update + - watch + apiGroups: + - '' + resources: + - configmaps + - secrets + - services + - namespaces + - verbs: + - create + - delete + - get + - list + - patch + - update + - watch + apiGroups: + - route.openshift.io + resources: + - routes + - verbs: + - create + - delete + - get + - list + - patch + - update + - watch + apiGroups: + - operators.coreos.com + resources: + - operatorgroups + - installplans + - verbs: + - create + - delete + - get + - list + - patch + - update + - watch + apiGroups: + - k8s.keycloak.org + resources: + - keycloaks + - verbs: + - get + - list + - patch + - update + - watch + apiGroups: + - packages.operators.coreos.com + resources: + - packagemanifests + - verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - deletecollection + apiGroups: + - '*' + resources: + - '*' + - verbs: + - get + apiGroups: + - '' + resources: + - secrets + resourceNames: + - postgresql-operator-controller-manager-1-18-7-service-cert + +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: nss-managed-role-from-{{ .Values.cpd_operators_namespace }} + namespace: {{ .Values.cpd_operators_namespace }} + annotations: + argocd.argoproj.io/sync-wave: "083" + labels: + managed-by: nss +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +subjects: + - kind: ServiceAccount + name: ibm-namespace-scope-operator + namespace: {{ .Values.cpd_operators_namespace }} +roleRef: + kind: Role + name: nss-managed-role-from-{{ .Values.cpd_operators_namespace }} + apiGroup: rbac.authorization.k8s.io + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: cpd-instance-admin-apply-olm + namespace: "{{ .Values.cpd_operators_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "082" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +rules: + - apiGroups: + - operators.coreos.com + resources: + - operatorgroups + verbs: + - create + - get + - list + - patch + - update + - apiGroups: + - operators.coreos.com + resources: + - catalogsources + verbs: + - create + - get + - list + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: cpd-instance-admin-read-catsrc + namespace: openshift-marketplace + annotations: + argocd.argoproj.io/sync-wave: "082" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +rules: + - apiGroups: + - operators.coreos.com + resources: + - catalogsources + verbs: + - get + - list + +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: nss-managed-role-from-{{ .Values.cpd_operators_namespace }} + namespace: "{{ .Values.cpd_instance_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "083" + labels: + managed-by: nss +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +subjects: + - kind: ServiceAccount + name: ibm-namespace-scope-operator + namespace: "{{ .Values.cpd_operators_namespace }}" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: nss-managed-role-from-{{ .Values.cpd_operators_namespace }} + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: "cpd-instance-op-admin-rbac-{{ .Values.instance_id }}" + annotations: + argocd.argoproj.io/sync-wave: "084" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +subjects: + - kind: ServiceAccount + name: "{{ .Values.cpd_admin_login_sa }}" + namespace: "{{ .Values.cpd_operators_namespace }}" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: admin + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: "cpd-instance-admin-rbac-{{ .Values.instance_id }}" + annotations: + argocd.argoproj.io/sync-wave: "084" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +subjects: + - kind: ServiceAccount + name: "{{ .Values.cpd_admin_login_sa }}" + namespace: "{{ .Values.cpd_instance_namespace }}" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: admin + +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: "cpd-instance-admin-apply-olm-rbac" + namespace: "{{ .Values.cpd_operators_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "084" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +subjects: + - kind: ServiceAccount + name: "{{ .Values.cpd_admin_login_sa }}" + namespace: "{{ .Values.cpd_operators_namespace }}" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cpd-instance-admin-apply-olm + +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: "cpd-instance-admin-read-catsrc-rbac" + namespace: "{{ .Values.cpd_operators_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "084" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +subjects: + - kind: ServiceAccount + name: "{{ .Values.cpd_admin_login_sa }}" + namespace: "{{ .Values.cpd_operators_namespace }}" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cpd-instance-admin-read-catsrc + +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: presync-cpd-prereq-sa + namespace: "{{ .Values.cpd_operators_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "082" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: presync-cpd-prereq-role-{{ .Values.instance_id }} + annotations: + argocd.argoproj.io/sync-wave: "082" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +rules: + - verbs: + - get + - list + - patch + - scale + apiGroups: + - '' + - apps + - zen.cpd.ibm.com + - cpd.ibm.com + - operator.ibm.com + resources: + - ibmcpds + - deployments + - zenservices + - secrets + - commonservices + - deployments/scale + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: presync-cpd-prereq-rb-{{ .Values.instance_id }} + annotations: + argocd.argoproj.io/sync-wave: "083" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +subjects: + - kind: ServiceAccount + name: presync-cpd-prereq-sa + namespace: "{{ .Values.cpd_operators_namespace }}" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: presync-cpd-prereq-role-{{ .Values.instance_id }} diff --git a/instance-applications/080-ibm-cp4d/templates/03-ibm-cp4d_OperatorGroup.yaml b/instance-applications/080-ibm-cp4d/templates/03-ibm-cp4d_OperatorGroup.yaml new file mode 100644 index 000000000..648fb9245 --- /dev/null +++ b/instance-applications/080-ibm-cp4d/templates/03-ibm-cp4d_OperatorGroup.yaml @@ -0,0 +1,33 @@ +--- +apiVersion: operators.coreos.com/v1alpha2 +kind: OperatorGroup +metadata: + name: common-service + namespace: "{{ .Values.cpd_operators_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "083" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + targetNamespaces: + - "{{ .Values.cpd_operators_namespace }}" + + +--- +apiVersion: operators.coreos.com/v1 +kind: OperatorGroup +metadata: + name: ibm-licensing-operator-app + namespace: "{{ .Values.cpd_cs_control_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "083" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + targetNamespaces: + - "{{ .Values.cpd_cs_control_namespace }}" + upgradeStrategy: Default diff --git a/instance-applications/080-ibm-cp4d/templates/04-ibm-cp4d_prereqs_ops.yaml b/instance-applications/080-ibm-cp4d/templates/04-ibm-cp4d_prereqs_ops.yaml new file mode 100644 index 000000000..6d92f33b3 --- /dev/null +++ b/instance-applications/080-ibm-cp4d/templates/04-ibm-cp4d_prereqs_ops.yaml @@ -0,0 +1,192 @@ +--- +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: ibm-namespace-scope-operator + namespace: "{{ .Values.cpd_operators_namespace }}" + labels: + operators.coreos.com/ibm-namespace-scope-operator.{{ .Values.cpd_operators_namespace }}: '' + annotations: + argocd.argoproj.io/sync-wave: "084" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + channel: "{{ .Values.namespace_scope_channel }}" + installPlanApproval: Automatic + name: ibm-namespace-scope-operator + source: opencloud-operators + sourceNamespace: "{{ .Values.cpd_operators_namespace }}" + + +--- +apiVersion: operator.ibm.com/v1 +kind: NamespaceScope +metadata: + name: cpd-operators + namespace: "{{ .Values.cpd_operators_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "085" + argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + csvInjector: + enable: true + license: + accept: true + namespaceMembers: + - "{{ .Values.cpd_operators_namespace }}" + - "{{ .Values.cpd_instance_namespace }}" + restartLabels: + intent: projected + + +--- +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: ibm-licensing-operator-app + namespace: "{{ .Values.cpd_cs_control_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "084" + labels: + operators.coreos.com/ibm-licensing-operator-app.{{ .Values.cpd_cs_control_namespace }}: '' +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + channel: "{{ .Values.cpd_ibm_licensing_channel }}" + installPlanApproval: Automatic + name: ibm-licensing-operator-app + source: ibm-operator-catalog + sourceNamespace: openshift-marketplace + + +--- +apiVersion: operator.ibm.com/v1alpha1 +kind: IBMLicensing +metadata: + name: instance + annotations: + argocd.argoproj.io/sync-wave: "085" + argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + datasource: datacollector + httpsEnable: true + license: + accept: true + resources: {} + usageContainer: + resources: {} + version: "{{ .Values.cpd_ibm_licensing_version }}" + + +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: common-service-maps-{{ .Values.instance_id }} + namespace: kube-public + labels: + operator.ibm.com/managedByCsOperator: 'true' +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} + annotations: + argocd.argoproj.io/sync-wave: "084" +data: + common-service-maps.yaml: | + controlNamespace: {{ .Values.cpd_cs_control_namespace }} + namespaceMapping: + - map-to-common-service-namespace: {{ .Values.cpd_instance_namespace }} + requested-from-namespace: + - {{ .Values.cpd_instance_namespace }} + - {{ .Values.cpd_operators_namespace }} + +--- +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: ibm-common-service-operator + namespace: "{{ .Values.cpd_operators_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "084" + labels: + operators.coreos.com/ibm-common-service-operator.{{ .Values.cpd_operators_namespace }}: '' +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + channel: "{{ .Values.cpfs_channel }}" + installPlanApproval: Automatic + name: ibm-common-service-operator + source: opencloud-operators + sourceNamespace: "{{ .Values.cpd_operators_namespace }}" + +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: "patch-common-service-job" + namespace: "{{ .Values.cpd_operators_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "085" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + template: +{{- if .Values.custom_labels }} + metadata: + labels: +{{ .Values.custom_labels | toYaml | indent 8 }} +{{- end }} + spec: + containers: + - name: run + image: quay.io/ibmmas/cli:9.4.0-pre.gitops + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 200m + memory: 512Mi + requests: + cpu: 10m + memory: 64Mi + env: + - name: CPD_OPERATORS_NAMESPACE + value: {{ .Values.cpd_operators_namespace }} + - name: CPFS_SIZE + value: {{ .Values.cpfs_size }} + command: + - /bin/sh + - -c + - | + + set -e + + echo + echo "================================================================================" + echo "Wait for Common Service CR to be available to patch" + echo "================================================================================" + + while true; do + CS_LOOKUP=$(oc get CommonService common-service -n ${CPD_OPERATORS_NAMESPACE} --ignore-not-found) + if [[ ! -z $CS_LOOKUP ]]; then + oc patch --type='merge' CommonService common-service -n ${CPD_OPERATORS_NAMESPACE} -p "{\"spec\":{ \"size\":\"$CPFS_SIZE\"}}" + break + fi + done + + restartPolicy: Never + serviceAccountName: presync-cpd-prereq-sa + backoffLimit: 4 diff --git a/instance-applications/080-ibm-cp4d/templates/06-ibm-cp4d_Subscription.yaml b/instance-applications/080-ibm-cp4d/templates/06-ibm-cp4d_Subscription.yaml new file mode 100644 index 000000000..c38296be4 --- /dev/null +++ b/instance-applications/080-ibm-cp4d/templates/06-ibm-cp4d_Subscription.yaml @@ -0,0 +1,18 @@ +--- +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: cpd-operator + namespace: "{{ .Values.cpd_operators_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "086" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + channel: "{{ .Values.cpd_platform_channel }}" + installPlanApproval: Automatic + name: cpd-platform-operator + source: cpd-platform + sourceNamespace: "{{ .Values.cpd_operators_namespace }}" diff --git a/instance-applications/080-ibm-cp4d/templates/07-ibm-cp4d_Ibmcpd.yaml b/instance-applications/080-ibm-cp4d/templates/07-ibm-cp4d_Ibmcpd.yaml new file mode 100644 index 000000000..2fc6e57ed --- /dev/null +++ b/instance-applications/080-ibm-cp4d/templates/07-ibm-cp4d_Ibmcpd.yaml @@ -0,0 +1,23 @@ +--- +apiVersion: cpd.ibm.com/v1 +kind: Ibmcpd +metadata: + name: "{{ .Values.cpd_platform_cr_name }}" + namespace: "{{ .Values.cpd_instance_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "087" + argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + imagePullSecret: ibm_entitlement_key + version: "{{ .Values.cpd_product_version }}" + license: + accept: true + license: Standard + servicesNamespace: {{ .Values.cpd_operators_namespace }} + iamIntegration: {{ .Values.cpd_iam_integration }} + fileStorageClass: "{{ .Values.cpd_primary_storage_class }}" + blockStorageClass: "{{ .Values.cpd_metadata_storage_class }}" diff --git a/instance-applications/080-ibm-cp4d/templates/08-ibm-cp4d-postsync-verify.yaml b/instance-applications/080-ibm-cp4d/templates/08-ibm-cp4d-postsync-verify.yaml new file mode 100644 index 000000000..b28b4afff --- /dev/null +++ b/instance-applications/080-ibm-cp4d/templates/08-ibm-cp4d-postsync-verify.yaml @@ -0,0 +1,190 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: "cpd-postsync-verify-job" + namespace: "{{ .Values.cpd_operators_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "089" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + template: +{{- if .Values.custom_labels }} + metadata: + labels: +{{ .Values.custom_labels | toYaml | indent 8 }} +{{- end }} + spec: + containers: + - name: run + image: quay.io/ibmmas/cli:9.4.0-pre.gitops + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 200m + memory: 512Mi + requests: + cpu: 10m + memory: 64Mi + env: + - name: CPD_INSTANCE_NAMESPACE + value: {{ .Values.cpd_instance_namespace }} + - name: CPD_OPERATORS_NAMESPACE + value: {{ .Values.cpd_operators_namespace }} + - name: CPD_SCALE_CONFIG + value: {{ .Values.cpd_scale_config }} + command: + - /bin/sh + - -c + - | + + set -e + echo + echo "================================================================================" + echo "Wait for ZenService Cr and patch it if needed" + echo "================================================================================" + echo + + echo + echo "Wait for ibm-zen-operator to be ready" + + wait_period=0 + while true; do + wait_period=$(($wait_period+60)) + if [ $wait_period -gt 1200 ]; then + echo "ZenService operator is not ready after 20 minutes of waiting. exiting..." + exit 1 + else + sleep 60 + fi + + ZEN_OPERATOR_LOOKUP=$(oc get deployment ibm-zen-operator -n $CPD_OPERATORS_NAMESPACE --ignore-not-found -ojsonpath='{.status.availableReplicas}') + if [[ ! -z $ZEN_OPERATOR_LOOKUP ]]; then + echo "ZenService Operator has started" + break + fi + done + + echo "Patching ZenService lite-cr if not already patched" + while true; do + ZS_LOOKUP=$(oc get ZenService lite-cr -n ${CPD_INSTANCE_NAMESPACE} --ignore-not-found -ojsonpath='{.spec}') + if [[ ! -z $ZS_LOOKUP ]]; then + echo "ZenService spec has been fetched" + IS_PATCHED=$(oc get ZenService lite-cr -n ${CPD_INSTANCE_NAMESPACE} --ignore-not-found -ojsonpath='{.spec.ZenCoreMetaDb}') + if [[ -z $IS_PATCHED ]]; then + echo "patching zenservice" + oc patch --type='merge' ZenService lite-cr -n ${CPD_INSTANCE_NAMESPACE} -p "{\"spec\":{ \"scaleConfig\":\"$CPD_SCALE_CONFIG\",\"ZenCoreMetaDb\":{ \"name\":\"zen-metastoredb\",\"kind\":\"StatefulSet\",\"container\":\"zen-metastoredb\",\"replicas\":\"3\",\"resources\":{ \"limits\":{ \"cpu\":\"2\",\"memory\":\"4Gi\"},\"requests\":{ \"cpu\":\"200m\",\"memory\":\"1024Mi\"}}}}}" + echo "scaling zen operator down and up to force reconcile when the pod is restarted" + oc scale deployment ibm-zen-operator -n ${CPD_OPERATORS_NAMESPACE} --replicas=0 + oc scale deployment ibm-zen-operator -n ${CPD_OPERATORS_NAMESPACE} --replicas=1 + fi + break + fi + done + + echo + echo "Waiting for ibm-zen-operator to be ready again..." + + wait_period=0 + while true; do + wait_period=$(($wait_period+60)) + if [ $wait_period -gt 1200 ]; then + echo "ZenService operator is not ready after 20 minutes of waiting. exiting..." + exit 1 + else + sleep 60 + fi + + ZEN_OPERATOR_LOOKUP=$(oc get deployment ibm-zen-operator -n $CPD_OPERATORS_NAMESPACE --ignore-not-found -ojsonpath='{.status.availableReplicas}') + if [[ ! -z $ZEN_OPERATOR_LOOKUP ]]; then + echo "ZenService Operator is ready again" + break + fi + done + + echo + echo "Waiting for zenStatus to be 'Completed'..." + + wait_period=0 + while true; do + wait_period=$(($wait_period+120)) + if [ $wait_period -gt 7200 ]; then + echo "zenStatus is not complete after 2 hours of waiting. exiting..." + exit 1 + else + sleep 120 + fi + + ZENSERVICE_LOOKUP=$(oc get ZenService lite-cr -n ${CPD_INSTANCE_NAMESPACE} -o jsonpath='{.status.zenStatus}') + if [[ ! -z ${ZENSERVICE_LOOKUP} && ${ZENSERVICE_LOOKUP} == *"Completed"* ]]; then + echo "ZenService is complete" + break + fi + done + + echo + echo "Waiting for controlPlaneStatus to complete..." + + wait_period=0 + while true; do + wait_period=$(($wait_period+120)) + if [ $wait_period -gt 7200 ]; then + echo "ibmcpd controlPlaneStatus is not complete after 2 hours of waiting. exiting..." + exit 1 + else + sleep 120 + fi + + IBMCPD_LOOKUP=$(oc get Ibmcpd ibmcpd-cr -n ${CPD_INSTANCE_NAMESPACE} -o jsonpath='{.status.controlPlaneStatus}') + if [[ ! -z ${IBMCPD_LOOKUP} && ${IBMCPD_LOOKUP} == *"Completed"* ]]; then + echo "ibmcpd controlPlaneStatus is complete" + break + fi + done + + echo + echo "Lookup Cloud Pak for Data admin username" + USERNAME=$(oc get secret ibm-iam-bindinfo-platform-auth-idp-credentials -n ${CPD_INSTANCE_NAMESPACE} --ignore-not-found -o json | jq -r '.data.admin_username' | base64 -d) + + if [[ -z ${USERNAME} ]]; then + export CPD_ADMIN_USERNAME="admin" + export SECRET_NAME="admin-user-details" + export ADMIN_PASSWORD_PROPERTY="initial_admin_password" + + else + export CPD_ADMIN_USERNAME="cpadmin" + export SECRET_NAME="ibm-iam-bindinfo-platform-auth-idp-credentials" + export ADMIN_PASSWORD_PROPERTY="admin_password" + fi + + + echo + echo "Lookup Cloud Pak for Data admin password" + CPD_ADMIN_PASSWORD=$(oc get secret $SECRET_NAME -n ${CPD_INSTANCE_NAMESPACE} -o json | jq -r ".data.${ADMIN_PASSWORD_PROPERTY}" | base64 -d) + + echo + echo "Lookup Cloud Pak for Data admin url" + URL=$(oc get ZenService lite-cr -o jsonpath="{.status.url}{'\n'}" -n ${CPD_INSTANCE_NAMESPACE}) + + if [[ -z ${URL} || -z ${CPD_ADMIN_USERNAME} || -z ${CPD_ADMIN_PASSWORD} ]]; then + echo "CPD admin username/password/url details are missing, please check your Cloud Pak For Data instance health!" + exit 1 + fi + + CPD_ADMIN_URL="https://${URL}" + + echo + echo + echo "================================================================================" + echo "Debug Cloud Pak for Data details" + echo "================================================================================" + echo "- CP4D Dashboard ......................... ${CPD_ADMIN_URL}" + echo "- CP4D Admin Username .................... ${CPD_ADMIN_USERNAME}" + echo "- CP4D Admin Password .................... ${CPD_ADMIN_PASSWORD:0:2}" + + restartPolicy: Never + serviceAccountName: presync-cpd-prereq-sa + backoffLimit: 4 diff --git a/instance-applications/080-ibm-cp4d/values.yaml b/instance-applications/080-ibm-cp4d/values.yaml new file mode 100644 index 000000000..8d1d33f61 --- /dev/null +++ b/instance-applications/080-ibm-cp4d/values.yaml @@ -0,0 +1,18 @@ +--- +cpd_operators_namespace: "ibm-cpd-operators" +cpd_instance_namespace: "ibm-cpd" +cpd_cs_control_namespace: "cs-control" +ibm_entitlement_key: "" +cpd_admin_login_sa: "mas-cpd-cluster-admin-sa" +namespace_scope_channel: "" +cpd_ibm_licensing_channel: "" +cpd_ibm_licensing_version: "" +cpfs_channel: "" +cpfs_size: "small" +cpd_scale_config: "medium" +cpd_platform_channel: "" +cpd_platform_cr_name: "ibmcpd-cr" +cpd_product_version: "" +cpd_iam_integration: false +cpd_primary_storage_class: "" +cpd_metadata_storage_class: "" diff --git a/instance-applications/090-ibm-wsl/Chart.yaml b/instance-applications/090-ibm-wsl/Chart.yaml new file mode 100644 index 000000000..a49784673 --- /dev/null +++ b/instance-applications/090-ibm-wsl/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +name: ibm-wsl +description: IBM Watson Studio Local (WSL) +type: application +version: 1.0.0 diff --git a/instance-applications/090-ibm-wsl/README.md b/instance-applications/090-ibm-wsl/README.md new file mode 100644 index 000000000..7bcf9f700 --- /dev/null +++ b/instance-applications/090-ibm-wsl/README.md @@ -0,0 +1,3 @@ +IBM Watson Studio Local (WSL) +=============================================================================== +Deploys and configures the CP4D Service, Watson Studio Local (WSL) needed for `MAS Predict`. Deploys WSL operator and its dependencies. diff --git a/instance-applications/090-ibm-wsl/templates/00-ibm-wsl-precheck.yaml b/instance-applications/090-ibm-wsl/templates/00-ibm-wsl-precheck.yaml new file mode 100644 index 000000000..354637a3d --- /dev/null +++ b/instance-applications/090-ibm-wsl/templates/00-ibm-wsl-precheck.yaml @@ -0,0 +1,151 @@ +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: wsl-precheck-sa + namespace: "{{ .Values.cpd_operators_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "090" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: wsl-precheck-role-{{ .Values.instance_id }} + annotations: + argocd.argoproj.io/sync-wave: "090" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +rules: + - verbs: + - get + - list + - patch + - update + apiGroups: + - '' + - zen.cpd.ibm.com + - cpd.ibm.com + - operator.ibm.com + - apiextensions.k8s.io + - ccs.cpd.ibm.com + - elasticsearch.opencontent.ibm.com + - ws.cpd.ibm.com + resources: + - ibmcpds + - zenservices + - secrets + - commonservices + - customresourcedefinitions + - ccs + - elasticsearchclusters + - ws + - serviceaccounts + - verbs: + - get + - list + - scale + - patch + apiGroups: + - apps + resources: + - deployments + - deployments/scale + - verbs: + - delete + - list + apiGroups: + - batch + - apps + resources: + - jobs + - statefulsets + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: wsl-precheck-rb-{{ .Values.instance_id }} + annotations: + argocd.argoproj.io/sync-wave: "091" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +subjects: + - kind: ServiceAccount + name: wsl-precheck-sa + namespace: "{{ .Values.cpd_operators_namespace }}" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: wsl-precheck-role-{{ .Values.instance_id }} + + +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: "cpd-wsl-precheck" + namespace: "{{ .Values.cpd_operators_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "092" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + template: +{{- if .Values.custom_labels }} + metadata: + labels: +{{ .Values.custom_labels | toYaml | indent 8 }} +{{- end }} + spec: + containers: + - name: run + image: quay.io/ibmmas/cli:9.0.0-pre.gitops + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 200m + memory: 512Mi + requests: + cpu: 10m + memory: 64Mi + env: + - name: CPD_INSTANCE_NAMESPACE + value: {{ .Values.cpd_instance_namespace }} + command: + - /bin/sh + - -c + - | + + set -e + echo + echo "================================================================================" + echo "Check that ibmcpd-cr is ready" + echo "================================================================================" + + echo + echo "Checking if controlPlaneStatus is complete..." + + IBMCPD_LOOKUP=$(oc get Ibmcpd ibmcpd-cr -n ${CPD_INSTANCE_NAMESPACE} -o jsonpath='{.status.controlPlaneStatus}') + if [[ ! -z ${IBMCPD_LOOKUP} && ${IBMCPD_LOOKUP} == *"Completed"* ]]; then + echo "ibmcpd controlPlaneStatus is complete and ready for services install" + else + echo "Failed! IBM Cloud Pak for Data must be ready and successfully installed before installing any CloudPak for Data service." + echo "Current IBM Cloud Pak for Data installation status is ${IBMCPD_LOOKUP}" + echo "exiting..." + exit 1 + fi + + restartPolicy: Never + serviceAccountName: wsl-precheck-sa + backoffLimit: 4 diff --git a/instance-applications/090-ibm-wsl/templates/01-ibm-wsl-dependencies.yml b/instance-applications/090-ibm-wsl/templates/01-ibm-wsl-dependencies.yml new file mode 100644 index 000000000..f8adf0689 --- /dev/null +++ b/instance-applications/090-ibm-wsl/templates/01-ibm-wsl-dependencies.yml @@ -0,0 +1,198 @@ +# Custom Operand Registries +# ----------------------------------------------------------------------------- +--- +apiVersion: operator.ibm.com/v1alpha1 +kind: OperandRegistry +metadata: + name: ibm-cpd-ccs-registry + namespace: "{{ .Values.cpd_operators_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "093" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + operators: + - channel: "{{ .Values.ccs_channel }}" + installPlanApproval: Automatic + name: ibm-cpd-ccs-operator + namespace: "{{ .Values.cpd_operators_namespace }}" + packageName: ibm-cpd-ccs + scope: public + sourceNamespace: openshift-marketplace + +--- +apiVersion: operator.ibm.com/v1alpha1 +kind: OperandRegistry +metadata: + name: ibm-cpd-datarefinery-registry + namespace: "{{ .Values.cpd_operators_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "093" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + operators: + - channel: "{{ .Values.datarefinery_channel }}" + installPlanApproval: Automatic + name: ibm-cpd-datarefinery-operator + namespace: "{{ .Values.cpd_operators_namespace }}" + packageName: ibm-cpd-datarefinery + scope: public + sourceNamespace: openshift-marketplace + +--- +apiVersion: operator.ibm.com/v1alpha1 +kind: OperandRegistry +metadata: + name: ibm-cpd-ws-runtimes-registry + namespace: "{{ .Values.cpd_operators_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "093" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + operators: + - channel: "{{ .Values.ws_runtimes_channel }}" + installPlanApproval: Automatic + name: ibm-cpd-ws-runtimes-operator + namespace: "{{ .Values.cpd_operators_namespace }}" + packageName: ibm-cpd-ws-runtimes + scope: public + sourceNamespace: openshift-marketplace + +--- +apiVersion: operator.ibm.com/v1alpha1 +kind: OperandRegistry +metadata: + name: opencontent-rabbitmq-registry + namespace: "{{ .Values.cpd_operators_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "093" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + operators: + - channel: "{{ .Values.opencontent_rabbitmq_channel }}" + installPlanApproval: Automatic + name: rabbitmq-operator + namespace: "{{ .Values.cpd_operators_namespace }}" + packageName: ibm-rabbitmq-operator + scope: public + sourceNamespace: openshift-marketplace + +--- +apiVersion: operator.ibm.com/v1alpha1 +kind: OperandRegistry +metadata: + name: opencontent-elasticsearch-registry + namespace: "{{ .Values.cpd_operators_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "093" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + operators: + - channel: "{{ .Values.opencontent_elasticsearch_channel }}" + installPlanApproval: Automatic + name: elasticsearch-operator + namespace: "{{ .Values.cpd_operators_namespace }}" + packageName: ibm-elasticsearch-operator + scope: public + sourceNamespace: openshift-marketplace + +# Custom Operand Configs +# ----------------------------------------------------------------------------- +--- +apiVersion: operator.ibm.com/v1alpha1 +kind: OperandConfig +metadata: + name: ibm-cpd-ccs-registry + namespace: "{{ .Values.cpd_operators_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "093" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + services: + - name: ibm-cpd-ccs-operator + spec: {} + +--- +apiVersion: operator.ibm.com/v1alpha1 +kind: OperandConfig +metadata: + name: ibm-cpd-datarefinery-registry + namespace: "{{ .Values.cpd_operators_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "093" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + services: + - name: ibm-cpd-datarefinery-operator + spec: {} + +--- +apiVersion: operator.ibm.com/v1alpha1 +kind: OperandConfig +metadata: + name: ibm-cpd-ws-runtimes-registry + namespace: "{{ .Values.cpd_operators_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "093" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + services: + - name: ibm-cpd-ws-runtimes-operator + spec: {} + +--- +apiVersion: operator.ibm.com/v1alpha1 +kind: OperandConfig +metadata: + name: opencontent-rabbitmq-registry + namespace: "{{ .Values.cpd_operators_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "093" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + services: + - name: rabbitmq-operator + spec: {} + +--- +apiVersion: operator.ibm.com/v1alpha1 +kind: OperandConfig +metadata: + name: opencontent-elasticsearch-registry + namespace: "{{ .Values.cpd_operators_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "093" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + services: + - name: elasticsearch-operator + spec: {} diff --git a/instance-applications/090-ibm-wsl/templates/01-ibm-wsl_ServiceAccount.yaml b/instance-applications/090-ibm-wsl/templates/01-ibm-wsl_ServiceAccount.yaml new file mode 100644 index 000000000..5f964e8e9 --- /dev/null +++ b/instance-applications/090-ibm-wsl/templates/01-ibm-wsl_ServiceAccount.yaml @@ -0,0 +1,87 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: "cpd-wsl-sa-patch-job" + namespace: "{{ .Values.cpd_operators_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "093" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + template: +{{- if .Values.custom_labels }} + metadata: + labels: +{{ .Values.custom_labels | toYaml | indent 8 }} +{{- end }} + spec: + containers: + - name: run + image: quay.io/ibmmas/cli:9.0.0-pre.gitops + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 200m + memory: 512Mi + requests: + cpu: 10m + memory: 64Mi + env: + - name: CPD_INSTANCE_NAMESPACE + value: {{ .Values.cpd_instance_namespace }} + command: + - /bin/sh + - -c + - | + + set -e + echo + echo "================================================================================" + echo "Patch ServiceAccounts with ibm-entitlement-key image pull secret" + echo "================================================================================" + echo + + echo "patching default sa..." + oc secrets link -n ${CPD_INSTANCE_NAMESPACE} --for=pull default ibm-entitlement-key + + echo "patching zen-admin-sa sa..." + oc secrets link -n ${CPD_INSTANCE_NAMESPACE} --for=pull zen-admin-sa ibm-entitlement-key + + echo "patching zen-editor-sa sa..." + oc secrets link -n ${CPD_INSTANCE_NAMESPACE} --for=pull zen-editor-sa ibm-entitlement-key + + echo "patching zen-norbac-sa sa..." + oc secrets link -n ${CPD_INSTANCE_NAMESPACE} --for=pull zen-norbac-sa ibm-entitlement-key + + echo "patching zen-runtime-sa sa..." + oc secrets link -n ${CPD_INSTANCE_NAMESPACE} --for=pull zen-runtime-sa ibm-entitlement-key + + echo "patching zen-viewer-sa sa..." + oc secrets link -n ${CPD_INSTANCE_NAMESPACE} --for=pull zen-viewer-sa ibm-entitlement-key + + RESOURCE_NAME=$(oc get serviceaccount runtime-assemblies-operator -n ${CPD_INSTANCE_NAMESPACE} -o=jsonpath="{.metadata.name}" --ignore-not-found) + if [[ -z "${RESOURCE_NAME}" ]]; then + echo "creating runtime-assemblies-operator sa..." + oc create serviceaccount runtime-assemblies-operator -n ${CPD_INSTANCE_NAMESPACE} + fi + + echo "patching runtime-assemblies-operator sa..." + oc patch serviceaccount runtime-assemblies-operator -n ${CPD_INSTANCE_NAMESPACE} -p '{"imagePullSecrets":[{"name":"ibm-entitlement-key"}]}' + + RESOURCE_NAME=$(oc get serviceaccount runtime-manager-api -n ${CPD_INSTANCE_NAMESPACE} -o=jsonpath="{.metadata.name}" --ignore-not-found) + if [[ -z "${RESOURCE_NAME}" ]]; then + echo "creating runtime-manager-api sa..." + oc create serviceaccount runtime-manager-api -n ${CPD_INSTANCE_NAMESPACE} + fi + + echo "patching runtime-manager-api sa..." + oc patch serviceaccount runtime-manager-api -n ${CPD_INSTANCE_NAMESPACE} -p '{"imagePullSecrets":[{"name":"ibm-entitlement-key"}]}' + + echo "Done" + + restartPolicy: Never + serviceAccountName: wsl-precheck-sa + backoffLimit: 4 diff --git a/instance-applications/090-ibm-wsl/templates/02-ibm-wsl_Subscription.yaml b/instance-applications/090-ibm-wsl/templates/02-ibm-wsl_Subscription.yaml new file mode 100644 index 000000000..6dabf9a63 --- /dev/null +++ b/instance-applications/090-ibm-wsl/templates/02-ibm-wsl_Subscription.yaml @@ -0,0 +1,94 @@ +--- +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: cpd-wsl-operator + namespace: "{{ .Values.cpd_operators_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "094" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + name: ibm-cpd-wsl + channel: "{{ .Values.wsl_channel }}" + source: ibm-operator-catalog + sourceNamespace: openshift-marketplace + installPlanApproval: Automatic + +--- +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: ibm-cpd-ccs-operator + namespace: "{{ .Values.cpd_operators_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "094" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + channel: "{{ .Values.ccs_channel }}" + installPlanApproval: Automatic + name: ibm-cpd-ccs + source: ibm-operator-catalog + sourceNamespace: openshift-marketplace + +--- +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: ibm-cpd-datarefinery-operator + namespace: "{{ .Values.cpd_operators_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "094" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + channel: "{{ .Values.datarefinery_channel }}" + installPlanApproval: Automatic + name: ibm-cpd-datarefinery + source: ibm-operator-catalog + sourceNamespace: openshift-marketplace + +--- +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: ibm-cpd-ws-runtimes-operator + namespace: "{{ .Values.cpd_operators_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "094" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + channel: "{{ .Values.ws_runtimes_channel }}" + installPlanApproval: Automatic + name: ibm-cpd-ws-runtimes + source: ibm-operator-catalog + sourceNamespace: openshift-marketplace + +--- +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: ibm-elasticsearch-operator-v1.1-ibm-operator-catalog-openshift-marketplace + namespace: "{{ .Values.cpd_operators_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "094" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + channel: "{{ .Values.opencontent_elasticsearch_channel }}" + installPlanApproval: Automatic + name: ibm-elasticsearch-operator + source: ibm-operator-catalog + sourceNamespace: openshift-marketplace diff --git a/instance-applications/090-ibm-wsl/templates/03-ibm-wsl-cr.yaml b/instance-applications/090-ibm-wsl/templates/03-ibm-wsl-cr.yaml new file mode 100644 index 000000000..1f555a078 --- /dev/null +++ b/instance-applications/090-ibm-wsl/templates/03-ibm-wsl-cr.yaml @@ -0,0 +1,26 @@ +--- +apiVersion: ws.cpd.ibm.com/v1beta1 +kind: WS +metadata: + name: "ws-cr" + namespace: "{{ .Values.cpd_instance_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "095" + argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + imagePullSecret: ibm-entitlement-key + ignoreForMaintenance: false + license: + accept: true + license: Standard + scaleConfig: "{{ .Values.cpd_service_scale_config }}" + fileStorageClass: "{{ .Values.cpd_service_storage_class }}" + blockStorageClass: "{{ .Values.cpd_service_block_storage_class }}" + version: "{{ .Values.wsl_version }}" + ccs_operand_version: "{{ .Values.ccs_version }}" + datarefinery_operand_version: "{{ .Values.datarefinery_version }}" + wsrt_operand_version: "{{ .Values.ws_runtimes_version }}" diff --git a/instance-applications/090-ibm-wsl/templates/04-ibm-wsl-post-verify.yaml b/instance-applications/090-ibm-wsl/templates/04-ibm-wsl-post-verify.yaml new file mode 100644 index 000000000..77b952c70 --- /dev/null +++ b/instance-applications/090-ibm-wsl/templates/04-ibm-wsl-post-verify.yaml @@ -0,0 +1,450 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: "cpd-wsl-post-verify-job-part-a" + namespace: "{{ .Values.cpd_operators_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "096" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + template: +{{- if .Values.custom_labels }} + metadata: + labels: +{{ .Values.custom_labels | toYaml | indent 8 }} +{{- end }} + spec: + containers: + - name: run + image: quay.io/ibmmas/cli:9.0.0-pre.gitops + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 200m + memory: 512Mi + requests: + cpu: 10m + memory: 64Mi + env: + - name: CPD_INSTANCE_NAMESPACE + value: {{ .Values.cpd_instance_namespace }} + - name: CPD_OPERATORS_NAMESPACE + value: {{ .Values.cpd_operators_namespace }} + - name: CPD_SERVICE_STORAGE_CLASS + value: {{ .Values.cpd_service_storage_class }} + - name: CPD_SERVICE_BLOCK_STORAGE_CLASS + value: {{ .Values.cpd_service_block_storage_class }} + command: + - /bin/sh + - -c + - | + + set -e + echo + echo "================================================================================" + echo "Part A - Waiting for WSL and its dependencies to be complete" + echo "Wait for CCS Cr to be ready and patch if needed" + echo "================================================================================" + echo + + echo + echo "Waiting for ccs-cr to be ready..." + + wait_period=0 + while true; do + wait_period=$(($wait_period+60)) + if [ $wait_period -gt 1800 ]; then + echo "ccs-cr is not ready after 30 minutes of waiting. exiting..." + exit 1 + else + sleep 60 + fi + + CCS_CR_LOOKUP=$(oc get CCS ccs-cr -n $CPD_INSTANCE_NAMESPACE --ignore-not-found) + if [[ ! -z $CCS_CR_LOOKUP ]]; then + echo "CCS Cr is available" + break + fi + done + + echo "Patching CCS ccs-cr if not already patched" + CCS_COUCHDB_RES=$(oc get CCS ccs-cr -n ${CPD_INSTANCE_NAMESPACE} -ojsonpath='{.spec.couchdb_resources}') + CCS_BLOCKSTORAGE=$(oc get CCS ccs-cr -n ${CPD_INSTANCE_NAMESPACE} -ojsonpath='{.spec.blockStorageClass}') + CCS_IMAGEPULLSECRET=$(oc get CCS ccs-cr -n ${CPD_INSTANCE_NAMESPACE} -ojsonpath='{.spec.imagePullSecret}') + + if [[ -z $CCS_COUCHDB_RES || -z $CCS_BLOCKSTORAGE || -z $CCS_IMAGEPULLSECRET ]]; then + echo "CCS CR needs to be patched" + echo "patching CCS Cr..." + oc patch --type='merge' CCS ccs-cr -n ${CPD_INSTANCE_NAMESPACE} -p "{\"spec\":{ \"imagePullSecret\":\"ibm-entitlement-key\",\"blockStorageClass\":\"$CPD_SERVICE_BLOCK_STORAGE_CLASS\",\"fileStorageClass\":\"$CPD_SERVICE_STORAGE_CLASS\",\"couchdb_resources\":{ \"limits\":{ \"cpu\":\"16\",\"memory\":\"16Gi\"},\"requests\":{ \"cpu\":\"3\",\"memory\":\"256Mi\"},\"couchdb_search_resources\":{ \"limits\":{ \"cpu\":\"4\",\"memory\":\"6Gi\"},\"requests\":{ \"cpu\":\"250m\",\"memory\":\"256Mi\"}}}}}" + echo "scaling CCS operator down and up to force reconcile when the pod is restarted" + oc scale deployment ibm-cpd-ccs-operator -n ${CPD_OPERATORS_NAMESPACE} --replicas=0 + oc scale deployment ibm-cpd-ccs-operator -n ${CPD_OPERATORS_NAMESPACE} --replicas=1 + fi + + echo + echo "Waiting for ibm-cpd-ccs-operator to be ready again..." + + wait_period=0 + while true; do + wait_period=$(($wait_period+60)) + if [ $wait_period -gt 1200 ]; then + echo "CCS operator is not ready after 20 minutes of waiting. exiting..." + exit 1 + else + sleep 60 + fi + + CCS_OPERATOR_LOOKUP=$(oc get deployment ibm-cpd-ccs-operator -n $CPD_OPERATORS_NAMESPACE --ignore-not-found -ojsonpath='{.status.availableReplicas}') + if [[ ! -z $CCS_OPERATOR_LOOKUP ]]; then + echo "CCS Operator is ready again" + break + fi + done + + echo + echo "================================================================================" + echo "Wait for Elastic Search to be ready and patch if needed before checking ccsStatus" + echo "================================================================================" + echo + + echo + echo "Waiting for elasticsearch-master to be ready..." + + wait_period=0 + while true; do + wait_period=$(($wait_period+60)) + if [ $wait_period -gt 1800 ]; then + echo "elasticsearch-master is not ready after 30 minutes of waiting. Exiting..." + exit 1 + else + sleep 60 + fi + + ELASTICSEARCH_CR_LOOKUP=$(oc get ElasticsearchCluster elasticsearch-master -n $CPD_INSTANCE_NAMESPACE --ignore-not-found) + if [[ ! -z $ELASTICSEARCH_CR_LOOKUP ]]; then + echo "Elasticsearch Cr is available" + break + fi + done + + echo "Patching ElasticsearchCluster elasticsearch-master if not already patched" + + ELASTICSEARCH_IMAGEPULLSECRET=$(oc get ElasticsearchCluster elasticsearch-master -n ${CPD_INSTANCE_NAMESPACE} -ojsonpath='{.spec.imagePullSecret}') + + if [[ -z $ELASTICSEARCH_IMAGEPULLSECRET ]]; then + echo "elasticsearch-master needs to be patched" + echo "patching elasticsearch-master and forcing reconcile..." + oc scale deployment ibm-elasticsearch-operator-ibm-es-controller-manager -n ${CPD_OPERATORS_NAMESPACE} --replicas=0 + oc delete Job elasticsearch-master-ibm-elasticsearch-create-snapshot-repo-job -n ${CPD_INSTANCE_NAMESPACE} --ignore-not-found + oc delete StatefulSet -n ${CPD_INSTANCE_NAMESPACE} --selector="app.kubernetes.io/instance=elasticsearch-master" --ignore-not-found + oc patch --type='merge' ElasticsearchCluster elasticsearch-master -n ${CPD_INSTANCE_NAMESPACE} -p "{\"spec\":{ \"imagePullSecret\":\"ibm-entitlement-key\"}}" + oc scale deployment ibm-elasticsearch-operator-ibm-es-controller-manager -n ${CPD_OPERATORS_NAMESPACE} --replicas=1 + fi + + echo + echo "Waiting for ibm-elasticsearch-operator to be ready again..." + + wait_period=0 + while true; do + wait_period=$(($wait_period+60)) + if [ $wait_period -gt 1200 ]; then + echo "Elasticsearch operator is not ready after 20 minutes of waiting. exiting..." + exit 1 + else + sleep 60 + fi + + ELASTICSEARCH_OPERATOR_LOOKUP=$(oc get deployment ibm-elasticsearch-operator-ibm-es-controller-manager -n $CPD_OPERATORS_NAMESPACE --ignore-not-found -ojsonpath='{.status.availableReplicas}') + if [[ ! -z $ELASTICSEARCH_OPERATOR_LOOKUP ]]; then + echo "Elasticsearch Operator is ready again" + break + fi + done + + echo + echo "================================================================================" + echo "Back to CCS" + echo "================================================================================" + echo + echo "Waiting for ccsStatus to be 'Completed'..." + + wait_period=0 + while true; do + wait_period=$(($wait_period+300)) + if [ $wait_period -gt 15000 ]; then + echo "ccsStatus is not complete after 4 hours and 10 minutes of waiting. Exiting..." + exit 1 + else + sleep 300 + fi + + CCSSTATUS_LOOKUP=$(oc get CCS ccs-cr -n ${CPD_INSTANCE_NAMESPACE} -o jsonpath='{.status.ccsStatus}') + if [[ ! -z ${CCSSTATUS_LOOKUP} && ${CCSSTATUS_LOOKUP} == *"Completed"* ]]; then + echo "CCS is complete" + break + fi + done + + + echo + echo "================================================================================" + echo "Wait for WSL to be ready and patch if needed" + echo "================================================================================" + echo + + echo "Waiting for notebookruntimes.ws.cpd.ibm.com CRD to be available..." + + wait_period=0 + while true; do + wait_period=$(($wait_period+5)) + if [ $wait_period -gt 600 ]; then + echo "notebookruntimes.ws.cpd.ibm.com CRD is not avalable after 10 minutes of waiting. Exiting..." + exit 1 + else + sleep 5 + fi + + WSR_LOOKUP=$(oc get CustomResourceDefinition notebookruntimes.ws.cpd.ibm.com --ignore-not-found) + if [[ ! -z ${WSR_LOOKUP} ]]; then + echo "notebookruntimes.ws.cpd.ibm.com is available" + break + fi + done + + echo + echo "================================================================================" + echo "to be continued in job part B..." + echo + + restartPolicy: Never + serviceAccountName: wsl-precheck-sa + backoffLimit: 4 + + +--- +apiVersion: ws.cpd.ibm.com/v1beta1 +kind: NotebookRuntime +metadata: + name: "ibm-cpd-ws-runtime-py39" + namespace: "{{ .Values.cpd_instance_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "097" + argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + kind: "ibm-cpd-ws-runtime-py39" + license: + accept: true + version: "{{ .Values.ws_runtimes_version }}" + +--- +kind: Secret +apiVersion: v1 +metadata: + name: aws + namespace: "{{ .Values.cpd_operators_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "097" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +stringData: + aws_access_key_id: {{ .Values.sm_aws_access_key_id }} + aws_secret_access_key: {{ .Values.sm_aws_secret_access_key }} +type: Opaque + + + +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: "cpd-wsl-post-verify-job-part-b" + namespace: "{{ .Values.cpd_operators_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "098" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + template: +{{- if .Values.custom_labels }} + metadata: + labels: +{{ .Values.custom_labels | toYaml | indent 8 }} +{{- end }} + spec: + containers: + - name: run + image: quay.io/ibmmas/cli:9.0.0-pre.gitops + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 200m + memory: 512Mi + requests: + cpu: 10m + memory: 64Mi + env: + - name: CPD_INSTANCE_NAMESPACE + value: {{ .Values.cpd_instance_namespace }} + - name: ACCOUNT_ID + value: {{ .Values.account_id }} + - name: REGION_ID + value: {{ .Values.region_id }} + - name: CLUSTER_ID + value: {{ .Values.cluster_id }} + - name: INSTANCE_ID + value: {{ .Values.instance_id }} + # Hard-coded for now: + - name: AVP_TYPE + value: "aws" + volumeMounts: + - name: aws + mountPath: /etc/mas/creds/aws + command: + - /bin/sh + - -c + - | + + set -e + echo + echo "================================================================================" + echo "Part B - Waiting for WSL and its dependencies to be complete" + echo "Wait for WS Status to be Completed" + echo "================================================================================" + echo + + echo "Waiting for wsStatus to be 'Completed'..." + + wait_period=0 + while true; do + wait_period=$(($wait_period+300)) + if [ $wait_period -gt 9000 ]; then + echo "wsStatus is not complete after 2 hours and 30 minutes of waiting." + echo "Watson Studio install failed (wsStatus). Exiting..." + exit 1 + else + sleep 300 + fi + + WSSTATUS_LOOKUP=$(oc get WS ws-cr -n ${CPD_INSTANCE_NAMESPACE} -o jsonpath='{.status.wsStatus}') + if [[ ! -z ${WSSTATUS_LOOKUP} && ${WSSTATUS_LOOKUP} == *"Completed"* ]]; then + echo "Watson Studio is complete" + break + fi + done + + + echo + echo "================================================================================" + echo "Lookup Cloud Pak for Data admin username" + USERNAME=$(oc get secret ibm-iam-bindinfo-platform-auth-idp-credentials -n ${CPD_INSTANCE_NAMESPACE} --ignore-not-found -o json | jq -r '.data.admin_username' | base64 -d) + + if [[ -z ${USERNAME} ]]; then + export CPD_ADMIN_USERNAME="admin" + export SECRET_NAME="admin-user-details" + export ADMIN_PASSWORD_PROPERTY="initial_admin_password" + + else + export CPD_ADMIN_USERNAME="cpadmin" + export SECRET_NAME="ibm-iam-bindinfo-platform-auth-idp-credentials" + export ADMIN_PASSWORD_PROPERTY="admin_password" + fi + + + echo + echo "Lookup Cloud Pak for Data admin password" + CPD_ADMIN_PASSWORD=$(oc get secret $SECRET_NAME -n ${CPD_INSTANCE_NAMESPACE} -o json | jq -r ".data.${ADMIN_PASSWORD_PROPERTY}" | base64 -d) + + echo + echo "Lookup Cloud Pak for Data admin url" + URL=$(oc get ZenService lite-cr -o jsonpath="{.status.url}{'\n'}" -n ${CPD_INSTANCE_NAMESPACE}) + + if [[ -z ${URL} || -z ${CPD_ADMIN_USERNAME} || -z ${CPD_ADMIN_PASSWORD} ]]; then + echo "CPD admin username/password/url details are missing, please check your Cloud Pak For Data instance health!" + exit 1 + fi + + CPD_ADMIN_URL="https://${URL}" + + echo + echo "================================================================================" + echo "Debug Cloud Pak for Data Admin details" + echo "================================================================================" + echo "- CP4D Dashboard ......................... ${CPD_ADMIN_URL}" + echo "- CP4D Admin Username .................... ${CPD_ADMIN_USERNAME}" + echo "- CP4D Admin Password .................... ${CPD_ADMIN_PASSWORD:0:2}" + + CPD_WSL_PROJECT_NAME=wsl-mas-${INSTANCE_ID}-predict + echo + echo "================================================================================" + echo "Generate MAS Config for WSL in $MAS_CONFIG_DIR folder" + echo "================================================================================" + echo "- CP4D WSL Project Name .................. ${CPD_WSL_PROJECT_NAME}" + echo + + echo "Authorize CP4D Admin and get bearer token" + echo "================================================================================" + CPD_BEARER_TOKEN=$(curl -k -X POST --location "${CPD_ADMIN_URL}/icp4d-api/v1/authorize" --header "Cache-Control:no-cache" --header "Content-Type:application/json" --data "{\"username\":\"$CPD_ADMIN_USERNAME\",\"password\":\"$CPD_ADMIN_PASSWORD\"}" | jq -r '.token') + + echo "Check if a project with the name ${CPD_WSL_PROJECT_NAME} exists" + echo "================================================================================" + curl -k -X GET --location "${CPD_ADMIN_URL}/v2/projects" --header "Authorization:Bearer ${CPD_BEARER_TOKEN}" --header "Cache-Control:no-cache" --header "Content-Type:application/json" -o projectlist.json + WSL_PROJECT_ID=$(cat projectlist.json | jq -r --arg pn "$CPD_WSL_PROJECT_NAME" '.resources[] | select(.entity.name==$pn) | .metadata.guid') + + + if [[ -z ${WSL_PROJECT_ID} ]]; then + CPD_WSL_PROJECT_DESCRIPTION="Watson Studio Project for Maximo Application Suite" + CPD_WSL_PROJECT_STORAGE_GUID=$(uuidgen -r) + echo "Project ${CPD_WSL_PROJECT_NAME} does not exist in the project list. A new project will be created." + echo "================================================================================" + echo "- Project Name ......................... ${CPD_WSL_PROJECT_NAME}" + echo "- Project Description .................. ${CPD_WSL_PROJECT_DESCRIPTION}" + echo "- Project Storage Guid ................. ${CPD_WSL_PROJECT_STORAGE_GUID}" + + echo "Check that the cpd admin user has permission to create projects" + echo "================================================================================" + curl -k -X PUT --location "${CPD_ADMIN_URL}/usermgmt/v1/role/zen_administrator_role" --header "Authorization:Bearer ${CPD_BEARER_TOKEN}" --header "Cache-Control:no-cache" --header "Content-Type:application/json" --data "{\"role_name\":\"Administrator\",\"description\":\"Administrator role\",\"permissions\":[\"administrator\",\"can_provision\",\"manage_catalog\",\"create_space\",\"create_project\"]}" + + echo "Creating a new project..." + WSL_PROJECT_ID=$(curl -k -X POST --location "${CPD_ADMIN_URL}/transactional/v2/projects" --header "Authorization:Bearer ${CPD_BEARER_TOKEN}" --header "Cache-Control:no-cache" --header "Content-Type:application/json" --data "{\"name\":\"$CPD_WSL_PROJECT_NAME\",\"description\":\"$CPD_WSL_PROJECT_DESCRIPTION\",\"generator\":\"cp4d-$CPD_ADMIN_USERNAME\",\"public\":false,\"storage\":{ \"type\":\"assetfiles\",\"guid\":\"$CPD_WSL_PROJECT_STORAGE_GUID\"}}" | jq -r '.location | split ("/") | last') + else + echo "Project named ${CPD_WSL_PROJECT_NAME} already exists and has been found" + fi + + echo "- WSL Project Id ............................. ${WSL_PROJECT_ID}" + echo "Writing WSL Project ID into Secret Manager for use by Predict" + + source /mascli/functions/gitops_utils + # NOTE: cannot just render AWS secrets into here, as it will be exposed in the ArgoCD UI + # Instead, we pass them into a secret (ArgoCD knows to hide any data fields in k8s secrets), + # mount the secret on the jobs filesystem, and read them in here + SM_AWS_ACCESS_KEY_ID=$(cat /etc/mas/creds/aws/aws_access_key_id) + SM_AWS_SECRET_ACCESS_KEY=$(cat /etc/mas/creds/aws/aws_secret_access_key) + export SM_AWS_REGION=${REGION_ID} + sm_login + + SECRET_NAME_WSL_PROJECT_ID=${ACCOUNT_ID}/${CLUSTER_ID}/${INSTANCE_ID}/$CPD_WSL_PROJECT_NAME + sm_update_secret $SECRET_NAME_WSL_PROJECT_ID "{\"wsl_project_id\": \"$WSL_PROJECT_ID\" }" + + echo "Done" + + restartPolicy: Never + serviceAccountName: wsl-precheck-sa + volumes: + - name: aws + secret: + secretName: aws + defaultMode: 420 + optional: false + backoffLimit: 4 \ No newline at end of file diff --git a/instance-applications/090-ibm-wsl/values.yaml b/instance-applications/090-ibm-wsl/values.yaml new file mode 100644 index 000000000..5299606a8 --- /dev/null +++ b/instance-applications/090-ibm-wsl/values.yaml @@ -0,0 +1,14 @@ +--- +cpd_service_storage_class: "" +cpd_service_block_storage_class: "" +cpd_service_scale_config: "small" +wsl_version: "" +wsl_channel: "" +ccs_channel: "" +ccs_version: "" +datarefinery_version: "" +datarefinery_channel: "" +ws_runtimes_channel: "" +ws_runtimes_version: "" +opencontent_rabbitmq_channel: "" +opencontent_elasticsearch_channel: "" diff --git a/instance-applications/100-ibm-sls/Chart.yaml b/instance-applications/100-ibm-sls/Chart.yaml new file mode 100644 index 000000000..eae462620 --- /dev/null +++ b/instance-applications/100-ibm-sls/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +name: ibm-sls +description: IBM Suite License Service +type: application +version: 1.0.0 diff --git a/instance-applications/100-ibm-sls/README.md b/instance-applications/100-ibm-sls/README.md new file mode 100644 index 000000000..d6d69419d --- /dev/null +++ b/instance-applications/100-ibm-sls/README.md @@ -0,0 +1,5 @@ +IBM Suite License Service +=============================================================================== +Installs the `ibm-sls` operator and creates an instance of the `LicenseService`. + +Contains a job that runs last (`07-postsync-update-sm_Job.yaml`). This registers the `${ACCOUNT_ID}/${CLUSTER_ID}/${INSTANCE_ID}/sls` secret in the **Secrets Vault** used to share some information that is generated at runtime with other ArgoCD Applications. \ No newline at end of file diff --git a/instance-applications/100-ibm-sls/templates/01-ibm-sls_OperatorGroup.yaml b/instance-applications/100-ibm-sls/templates/01-ibm-sls_OperatorGroup.yaml new file mode 100644 index 000000000..5f42ff837 --- /dev/null +++ b/instance-applications/100-ibm-sls/templates/01-ibm-sls_OperatorGroup.yaml @@ -0,0 +1,16 @@ +--- +apiVersion: operators.coreos.com/v1alpha2 +kind: OperatorGroup +metadata: + name: operatorgroup + namespace: mas-{{ .Values.instance_id }}-sls + annotations: + argocd.argoproj.io/sync-wave: "101" + argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + targetNamespaces: + - mas-{{ .Values.instance_id }}-sls diff --git a/instance-applications/100-ibm-sls/templates/01-ibm-sls_namespace.yaml b/instance-applications/100-ibm-sls/templates/01-ibm-sls_namespace.yaml new file mode 100644 index 000000000..3acb34e06 --- /dev/null +++ b/instance-applications/100-ibm-sls/templates/01-ibm-sls_namespace.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: mas-{{ .Values.instance_id }}-sls + annotations: + argocd.argoproj.io/sync-wave: "100" + labels: + argocd.argoproj.io/managed-by: {{ .Values.argo_namespace }} +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} diff --git a/instance-applications/100-ibm-sls/templates/02-ibm-sls_Subscription.yaml b/instance-applications/100-ibm-sls/templates/02-ibm-sls_Subscription.yaml new file mode 100644 index 000000000..e67d47cd5 --- /dev/null +++ b/instance-applications/100-ibm-sls/templates/02-ibm-sls_Subscription.yaml @@ -0,0 +1,19 @@ +--- +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: ibm-sls + namespace: mas-{{ .Values.instance_id }}-sls + annotations: + argocd.argoproj.io/sync-wave: "102" + argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + channel: "{{ .Values.sls_channel }}" + installPlanApproval: Automatic + name: ibm-sls + source: ibm-operator-catalog + sourceNamespace: openshift-marketplace diff --git a/instance-applications/100-ibm-sls/templates/03-ibm-entitlement_Secret.yaml b/instance-applications/100-ibm-sls/templates/03-ibm-entitlement_Secret.yaml new file mode 100644 index 000000000..0a7b89e72 --- /dev/null +++ b/instance-applications/100-ibm-sls/templates/03-ibm-entitlement_Secret.yaml @@ -0,0 +1,16 @@ +--- +kind: Secret +apiVersion: v1 +metadata: + name: ibm-entitlement + namespace: mas-{{ .Values.instance_id }}-sls + annotations: + argocd.argoproj.io/sync-wave: "102" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: >- + {{ .Values.ibm_entitlement_key }} diff --git a/instance-applications/100-ibm-sls/templates/04-mongo-credentials_Secret.yaml b/instance-applications/100-ibm-sls/templates/04-mongo-credentials_Secret.yaml new file mode 100644 index 000000000..4cc8fb406 --- /dev/null +++ b/instance-applications/100-ibm-sls/templates/04-mongo-credentials_Secret.yaml @@ -0,0 +1,16 @@ +--- +kind: Secret +apiVersion: v1 +metadata: + name: "{{ .Values.sls_mongo_secret_name }}" + namespace: mas-{{ .Values.instance_id }}-sls + annotations: + argocd.argoproj.io/sync-wave: "103" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +type: Opaque +stringData: + username: "{{ .Values.sls_mongo_username }}" + password: "{{ .Values.sls_mongo_password }}" diff --git a/instance-applications/100-ibm-sls/templates/05-ibm-sls-sls-entitlement_Secret.yaml b/instance-applications/100-ibm-sls/templates/05-ibm-sls-sls-entitlement_Secret.yaml new file mode 100644 index 000000000..9fa059746 --- /dev/null +++ b/instance-applications/100-ibm-sls/templates/05-ibm-sls-sls-entitlement_Secret.yaml @@ -0,0 +1,16 @@ +--- +kind: Secret +apiVersion: v1 +metadata: + name: ibm-sls-sls-entitlement + namespace: mas-{{ .Values.instance_id }}-sls + annotations: + argocd.argoproj.io/sync-wave: "104" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +type: Opaque +stringData: + entitlement: >- + {{ .Values.sls_entitlement_file }} diff --git a/instance-applications/100-ibm-sls/templates/06-ibm-sls_LicenseService.yaml b/instance-applications/100-ibm-sls/templates/06-ibm-sls_LicenseService.yaml new file mode 100644 index 000000000..10e3775bb --- /dev/null +++ b/instance-applications/100-ibm-sls/templates/06-ibm-sls_LicenseService.yaml @@ -0,0 +1,26 @@ +--- +apiVersion: sls.ibm.com/v1 +kind: LicenseService +metadata: + name: sls + namespace: mas-{{ .Values.instance_id }}-sls + annotations: + argocd.argoproj.io/sync-wave: "105" + argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + mongo: +{{ .Values.mongo_spec | toYaml | indent 4 }} + license: + accept: true + settings: + auth: + enforce: true + registration: + open: true + {{- if .Values.icr_cp_open }} + registry: "{{ .Values.icr_cp_open }}" + {{ end }} diff --git a/instance-applications/100-ibm-sls/templates/07-postsync-update-sm_Job.yaml b/instance-applications/100-ibm-sls/templates/07-postsync-update-sm_Job.yaml new file mode 100644 index 000000000..16e8dd70e --- /dev/null +++ b/instance-applications/100-ibm-sls/templates/07-postsync-update-sm_Job.yaml @@ -0,0 +1,223 @@ +{{- if .Values.run_sync_hooks }} + +{{ $ns := printf "mas-%s-sls" .Values.instance_id}} +{{ $aws_secret := "aws"}} +{{ $np_name := "postsync-ibm-sls-update-sm-np" }} +{{ $role_name := "postsync-ibm-sls-update-sm-r" }} +{{ $sa_name := "postsync-ibm-sls-update-sm-sa" }} +{{ $rb_name := "postsync-ibm-sls-update-sm-rb" }} +{{ $job_label := "postsync-ibm-sls-update-sm-job" }} + + + +--- +# Permit outbound communication by the Job pods +# (Needed to communicate with the K8S HTTP API and AWS SM) +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: {{ $np_name }} + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/sync-wave: "110" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + podSelector: + matchLabels: + app: {{ $job_label }} + egress: + - {} + policyTypes: + - Egress + + +--- +kind: Secret +apiVersion: v1 +metadata: + name: {{ $aws_secret }} + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/sync-wave: "110" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +data: + aws_access_key_id: {{ .Values.sm_aws_access_key_id | b64enc }} + aws_secret_access_key: {{ .Values.sm_aws_secret_access_key | b64enc }} +type: Opaque + +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: {{ $sa_name }} + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/sync-wave: "110" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} + +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ $role_name }} + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/sync-wave: "110" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +rules: + - verbs: + - get + apiGroups: + - "" + resources: + - configmaps + + + +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ $rb_name }} + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/sync-wave: "111" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +subjects: + - kind: ServiceAccount + name: {{ $sa_name }} + namespace: {{ $ns }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ $role_name }} + +--- +apiVersion: batch/v1 +kind: Job +metadata: + # Generate the job name by suffixing the label with a hash of all chart values + # This is to ensure that ArgoCD will delete and recreate the job if (and only if) anything changes + # Any change to instance config will trigger a rerun of the job. + # We can refine this in future to only take into account a subset of instance config (perhaps just values under ibm_sls?). + # But the job is idempotent and quick so no real harm in running it when we don't actually need to. + name: "{{ $job_label }}-{{ .Values | toYaml | adler32sum }}" + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/sync-wave: "112" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + template: + metadata: + labels: + app: {{ $job_label }} +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 8 }} +{{- end }} + spec: + containers: + - name: run + image: quay.io/ibmmas/cli:9.4.0-pre.gitops + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 200m + memory: 512Mi + requests: + cpu: 10m + memory: 64Mi + env: + - name: ACCOUNT_ID + value: {{ .Values.account_id }} + - name: REGION_ID + value: {{ .Values.region_id }} + - name: CLUSTER_ID + value: {{ .Values.cluster_id }} + - name: INSTANCE_ID + value: {{ .Values.instance_id }} + # Hard-coded for now: + - name: AVP_TYPE + value: "aws" + volumeMounts: + - name: aws + mountPath: /etc/mas/creds/aws + - name: sls-suite-registration + mountPath: /etc/mas/creds/sls-suite-registration + command: + - /bin/sh + - -c + - | + + set -e + + # NOTE: cannot just render AWS secrets into here, as it will be exposed in the ArgoCD UI + # Instead, we pass them into a secret (ArgoCD knows to hide any data fields in k8s secrets), + # mount the secret on the jobs filesystem, and read them in here + SM_AWS_ACCESS_KEY_ID=$(cat /etc/mas/creds/aws/aws_access_key_id) + SM_AWS_SECRET_ACCESS_KEY=$(cat /etc/mas/creds/aws/aws_secret_access_key) + + echo "Fetching registrationKey from sls-suite-registration ConfigMap in mas-${INSTANCE_ID}-sls" + SLS_REGISTRATION_KEY=$(cat /etc/mas/creds/sls-suite-registration/registrationKey) + if [[ -z "${SLS_REGISTRATION_KEY}" ]]; then + echo "Failed to fetch registrationKey" + exit 1 + fi + + echo "Fetching ca from sls-suite-registration ConfigMap in mas-${INSTANCE_ID}-sls" + SLS_CA=$(cat /etc/mas/creds/sls-suite-registration/ca | base64 -w0) + if [[ -z "${SLS_CA}" ]]; then + echo "Failed to fetch ca" + exit 1 + fi + + # might as well take advantage of gitops_utils for sm_ functions as we're using the cli image + source /mascli/functions/gitops_utils + + # aws configure set aws_access_key_id $SM_AWS_ACCESS_KEY_ID + # aws configure set aws_secret_access_key $SM_AWS_SECRET_ACCESS_KEY + # aws configure set default.region $REGION_ID + # aws configure list + export SM_AWS_REGION=${REGION_ID} + sm_login + + # aws secretsmanager create-secret --name ${SECRET_NAME} --secret-string "${SECRET_VALUE}" + SECRET_NAME_SLS=${ACCOUNT_ID}/${CLUSTER_ID}/${INSTANCE_ID}/sls + sm_update_secret $SECRET_NAME_SLS "{\"registration_key\": \"$SLS_REGISTRATION_KEY\", \"ca_b64\": \"$SLS_CA\" }" + + + restartPolicy: Never + + # TODO: is this the correct SA to use here? + # No, probably want to add a more restricted SA that can just do things that these post-sync jobs need to do + serviceAccountName: {{ $sa_name }} + volumes: + - name: aws + secret: + secretName: {{ $aws_secret }} + defaultMode: 420 + optional: false + - name: sls-suite-registration + configMap: + name: sls-suite-registration + optional: false + + backoffLimit: 4 +{{- end }} diff --git a/instance-applications/100-ibm-sls/values.yaml b/instance-applications/100-ibm-sls/values.yaml new file mode 100644 index 000000000..b6d010fe5 --- /dev/null +++ b/instance-applications/100-ibm-sls/values.yaml @@ -0,0 +1,8 @@ +--- +ibm_entitlement_key: xxx +sls_mongo_username: xxx +sls_mongo_password: xxx +sls_mongo_secret_name: xxx +mongo_spec: {} +sls_entitlement_file: xxx +sls_channel: 3.x diff --git a/instance-applications/120-ibm-db2u-database/Chart.yaml b/instance-applications/120-ibm-db2u-database/Chart.yaml new file mode 100644 index 000000000..0f224009e --- /dev/null +++ b/instance-applications/120-ibm-db2u-database/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +name: ibm-db2u-database +description: IBM DB2U Database +type: application +version: 1.0.0 diff --git a/instance-applications/120-ibm-db2u-database/README.md b/instance-applications/120-ibm-db2u-database/README.md new file mode 100644 index 000000000..4b43f749d --- /dev/null +++ b/instance-applications/120-ibm-db2u-database/README.md @@ -0,0 +1,7 @@ +IBM DB2U Database +=============================================================================== +Create a Db2u database for a MAS app. + +Contains a presync hook (`00-presync-await-crd_Job.yaml`) that ensures we wait for the db2uclusters CRD to be installed before attempting to sync. + +Contains a job that runs last (`05-postsync-setup-db2_Job.yaml`). This registers the `${ACCOUNT_ID}/${CLUSTER_ID}/${MAS_INSTANCE_ID}/db2/${DB2_INSTANCE_NAME}/config` secret in the **Secrets Vault** used to share some information that is generated at runtime with other ArgoCD Applications. This job also performs some special configuration steps that are required if the Db2u database is intended for use by the Manage MAS Application. \ No newline at end of file diff --git a/instance-applications/120-ibm-db2u-database/templates/00-presync-await-crd_Job.yaml b/instance-applications/120-ibm-db2u-database/templates/00-presync-await-crd_Job.yaml new file mode 100644 index 000000000..6dfcdbfdc --- /dev/null +++ b/instance-applications/120-ibm-db2u-database/templates/00-presync-await-crd_Job.yaml @@ -0,0 +1,107 @@ +--- +# Service account that is authorized to read k8s secrets (needed by the job) +kind: ServiceAccount +apiVersion: v1 +metadata: + name: "presync-sa-{{ .Values.db2_instance_name }}" + namespace: "{{ .Values.db2_namespace }}" + annotations: + argocd.argoproj.io/hook: PreSync + argocd.argoproj.io/hook-delete-policy: HookSucceeded +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} + + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: "db2-database-presync-sa-rb-{{ .Values.db2_instance_name }}" + annotations: + argocd.argoproj.io/hook: PreSync + argocd.argoproj.io/hook-delete-policy: HookSucceeded +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +subjects: + - kind: ServiceAccount + name: "presync-sa-{{ .Values.db2_instance_name }}" + namespace: "{{ .Values.db2_namespace }}" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin + +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: "presync-await-crd-{{ .Values.db2_instance_name }}" + namespace: "{{ .Values.db2_namespace }}" + annotations: + argocd.argoproj.io/hook: PreSync + argocd.argoproj.io/hook-delete-policy: HookSucceeded +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + template: +{{- if .Values.custom_labels }} + metadata: + labels: +{{ .Values.custom_labels | toYaml | indent 8 }} +{{- end }} + spec: + containers: + - name: run + image: quay.io/ibmmas/cli:9.4.0-pre.gitops + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 200m + memory: 512Mi + requests: + cpu: 10m + memory: 64Mi + env: [] + volumeMounts: [] + command: + - /bin/sh + - -c + - | + + set -e + + + + # wait till CRD db2uclusters.db2u.databases.ibm.com NamesAccepted=True STARTS + wait_period=0 + while true + do + wait_period=$(($wait_period+10)) + if [ $wait_period -gt 300 ];then + echo "CRD db2uclusters.db2u.databases.ibm.com is not ready with in 300 sec, exiting" + exit 1 + else + sleep 10 + fi + export DB2_CRD_NAMES_ACCEPTED_STATUS=`oc get crd db2uclusters.db2u.databases.ibm.com -o=jsonpath="{.status.conditions[?(@.type=='NamesAccepted')].status}"` + echo "DB2_CRD_NAMES_ACCEPTED_STATUS .... ${DB2_CRD_NAMES_ACCEPTED_STATUS}" + + if [[ "$DB2_CRD_NAMES_ACCEPTED_STATUS" == "True" ]]; then + break + fi + done + # wait till CRD db2uclusters.db2u.databases.ibm.com NamesAccepted=True DONE + + restartPolicy: Never + + # TODO: is this the correct SA to use here? + # No, probably want to add a more restricted SA that can just do things that these post-sync jobs need to do + serviceAccountName: "presync-sa-{{ .Values.db2_instance_name }}" + volumes: [] + backoffLimit: 4 diff --git a/instance-applications/120-ibm-db2u-database/templates/01-db2_Issuer.yaml b/instance-applications/120-ibm-db2u-database/templates/01-db2_Issuer.yaml new file mode 100644 index 000000000..b2cdba818 --- /dev/null +++ b/instance-applications/120-ibm-db2u-database/templates/01-db2_Issuer.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: "db2u-issuer-{{ .Values.db2_instance_name }}" + namespace: "{{ .Values.db2_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "120" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + ca: + secretName: "db2u-ca" diff --git a/instance-applications/120-ibm-db2u-database/templates/02-db2_Certificate.yaml b/instance-applications/120-ibm-db2u-database/templates/02-db2_Certificate.yaml new file mode 100644 index 000000000..8749b531d --- /dev/null +++ b/instance-applications/120-ibm-db2u-database/templates/02-db2_Certificate.yaml @@ -0,0 +1,39 @@ +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: "db2u-certificate-{{ .Values.db2_instance_name }}" + namespace: "{{ .Values.db2_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "121" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + secretName: "db2u-certificate-{{ .Values.db2_instance_name }}" + duration: "175200h0m0s" + renewBefore: "2160h0m0s" + issuerRef: + name: "db2u-issuer-{{ .Values.db2_instance_name }}" + kind: Issuer + usages: + - cert sign + - digital signature + - key encipherment + - server auth + commonName: "db2u" + dnsNames: + - "{{ .Values.db2_instance_name | lower }}-{{ .Values.db2_namespace }}.{{ .Values.cluster_domain }}" + - "*.{{ .Values.db2_instance_name | lower }}-{{ .Values.db2_namespace }}.{{ .Values.cluster_domain }}" + - "c-{{ .Values.db2_instance_name | lower }}-db2u-engn-svc.{{ .Values.db2_namespace }}.svc" + - "*.c-{{ .Values.db2_instance_name | lower }}-db2u-engn-svc.{{ .Values.db2_namespace }}.svc" + subject: + countries: + - GB + streetAddresses: + - London + localities: + - London + organizationalUnits: + - IBM Maximo Application Suite DB2U diff --git a/instance-applications/120-ibm-db2u-database/templates/03-db2ucluster.yaml b/instance-applications/120-ibm-db2u-database/templates/03-db2ucluster.yaml new file mode 100644 index 000000000..aab2a2622 --- /dev/null +++ b/instance-applications/120-ibm-db2u-database/templates/03-db2ucluster.yaml @@ -0,0 +1,131 @@ +--- +apiVersion: db2u.databases.ibm.com/v1 +kind: Db2uCluster +metadata: + name: "{{ .Values.db2_instance_name | lower }}" + namespace: "{{ .Values.db2_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "122" + argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + account: + privileged: true + imagePullSecrets: + - ibm-registry + addOns: + graph: {} + rest: {} +{{- if .Values.db2_addons_audit_config }} + audit: +{{ .Values.db2_addons_audit_config | toYaml | indent 6 }} +{{- end }} + version: "{{ .Values.db2_version }}" + size: {{ .Values.db2_num_pods }} + environment: + dbType: db2wh + database: + name: "{{ .Values.db2_dbname }}" + +{{- if .Values.db2_database_db_config }} + dbConfig: +{{ .Values.db2_database_db_config | toYaml | indent 8 }} +{{- end }} + + settings: + dftTableOrg: "{{ .Values.db2_table_org }}" + ssl: + secretName: "db2u-certificate-{{ .Values.db2_instance_name }}" + certLabel: "CN=db2u" + instance: + +{{- if .Values.db2_instance_registry }} + registry: +{{ .Values.db2_instance_registry | toYaml | indent 8 }} +{{- end }} + +{{- if .Values.db2_instance_dbm_config }} + dbmConfig: +{{ .Values.db2_instance_dbm_config | toYaml | indent 8 }} +{{- end }} + mln: + total: {{ .Values.db2_mln_count }} + license: + accept: true + podConfig: + db2u: + resource: + db2u: + requests: + cpu: "{{ .Values.db2_cpu_requests }}" + memory: "{{ .Values.db2_memory_requests }}" + limits: + cpu: "{{ .Values.db2_cpu_limits }}" + memory: "{{ .Values.db2_memory_limits }}" + storage: + - name: meta + type: create + spec: + storageClassName: "{{ .Values.db2_meta_storage_class }}" + accessModes: + - "{{ .Values.db2_meta_storage_accessmode }}" + resources: + requests: + storage: "{{ .Values.db2_meta_storage_size }}" + - name: data + type: template + spec: + storageClassName: "{{ .Values.db2_data_storage_class }}" + accessModes: + - "{{ .Values.db2_data_storage_accessmode }}" + resources: + requests: + storage: "{{ .Values.db2_data_storage_size }}" +{{- if .Values.db2_backup_storage_class }} + - name: backup + type: create + spec: + accessModes: + - "{{ .Values.db2_backup_storage_accessmode }}" + resources: + requests: + storage: "{{ .Values.db2_backup_storage_size }}" + storageClassName: "{{ .Values.db2_backup_storage_class }}" + type: create +{{- end }} +{{- if .Values.db2_logs_storage_class }} + - name: activelogs + spec: + accessModes: + - "{{ .Values.db2_logs_storage_accessmode }}" + resources: + requests: + storage: "{{ .Values.db2_logs_storage_size }}" + storageClassName: "{{ .Values.db2_logs_storage_class }}" + type: template +{{- end }} +{{- if .Values.db2_temp_storage_class }} + - name: tempts + spec: + accessModes: + - "{{ .Values.db2_temp_storage_accessmode }}" + resources: + requests: + storage: "{{ .Values.db2_temp_storage_size }}" + storageClassName: "{{ .Values.db2_temp_storage_class }}" + type: template +{{- end }} +{{- if .Values.db2_audit_logs_storage_class }} + - name: auditlogs + spec: + accessModes: + - "{{ .Values.db2_audit_logs_storage_accessmode }}" + resources: + requests: + storage: "{{ .Values.db2_audit_logs_storage_size }}" + storageClassName: "{{ .Values.db2_audit_logs_storage_class }}" + type: template +{{- end }} diff --git a/instance-applications/120-ibm-db2u-database/templates/04-tlsroute.yaml b/instance-applications/120-ibm-db2u-database/templates/04-tlsroute.yaml new file mode 100644 index 000000000..40928ba49 --- /dev/null +++ b/instance-applications/120-ibm-db2u-database/templates/04-tlsroute.yaml @@ -0,0 +1,26 @@ +--- +kind: Route +apiVersion: route.openshift.io/v1 +metadata: + name: db2u-{{ .Values.db2_instance_name }}-tls-route + namespace: "{{ .Values.db2_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "123" + labels: + formation_id: "{{ .Values.db2_instance_name | lower }}" +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + host: >- + {{ .Values.db2_instance_name | lower }}-{{ .Values.db2_namespace }}.{{ .Values.cluster_domain }} + to: + kind: Service + name: "c-{{ .Values.db2_instance_name | lower }}-db2u-engn-svc" + weight: 100 + port: + targetPort: ssl-server + tls: + termination: passthrough + insecureEdgeTerminationPolicy: None + wildcardPolicy: None diff --git a/instance-applications/120-ibm-db2u-database/templates/05-postsync-setup-db2_Job.yaml b/instance-applications/120-ibm-db2u-database/templates/05-postsync-setup-db2_Job.yaml new file mode 100644 index 000000000..0e9701a2f --- /dev/null +++ b/instance-applications/120-ibm-db2u-database/templates/05-postsync-setup-db2_Job.yaml @@ -0,0 +1,512 @@ +--- +kind: Secret +apiVersion: v1 +metadata: + name: "aws-{{ .Values.db2_instance_name }}" + namespace: "{{ .Values.db2_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "127" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +data: + aws_access_key_id: {{ .Values.sm_aws_access_key_id | b64enc }} + aws_secret_access_key: {{ .Values.sm_aws_secret_access_key | b64enc }} +type: Opaque + +--- +# Service account that is authorized to read k8s secrets (needed by the job) +kind: ServiceAccount +apiVersion: v1 +metadata: + name: "postsync-sa-{{ .Values.db2_instance_name }}" + namespace: "{{ .Values.db2_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "127" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: "db2-database-postsync-sa-rb-{{ .Values.db2_instance_name }}" + annotations: + argocd.argoproj.io/sync-wave: "128" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +subjects: + - kind: ServiceAccount + name: "postsync-sa-{{ .Values.db2_instance_name }}" + namespace: "{{ .Values.db2_namespace }}" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin + + +--- +apiVersion: batch/v1 +kind: Job +metadata: + # Suffix the Job name with a hash of all chart values + # This is to ensure that ArgoCD will delete and recreate the job if (and only if) anything changes in the DB2 config + name: "postsync-setup-db2-{{ .Values.db2_instance_name }}-{{ .Values | toYaml | adler32sum }}" + namespace: "{{ .Values.db2_namespace }}" + annotations: + argocd.argoproj.io/sync-wave: "129" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + template: +{{- if .Values.custom_labels }} + metadata: + labels: +{{ .Values.custom_labels | toYaml | indent 8 }} +{{- end }} + spec: + containers: + - name: run + image: quay.io/ibmmas/cli:9.4.0-pre.gitops + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 200m + memory: 512Mi + requests: + cpu: 10m + memory: 64Mi + env: + - name: ACCOUNT_ID + value: {{ .Values.account_id }} + - name: REGION_ID + value: {{ .Values.region_id }} + - name: CLUSTER_ID + value: {{ .Values.cluster_id }} + - name: MAS_INSTANCE_ID + value: {{ .Values.instance_id }} + - name: MAS_APP_ID + value: {{ .Values.mas_application_id}} + # Hard-coded for now: + - name: AVP_TYPE + value: "aws" + - name: DB2_NAMESPACE + value: "{{ .Values.db2_namespace }}" + - name: DB2_INSTANCE_NAME + value: "{{ .Values.db2_instance_name }}" + - name: DB2_DBNAME + value: "{{ .Values.db2_dbname }}" + - name: DB2_TLS_VERSION + value: "{{ .Values.db2_tls_version }}" + + volumeMounts: + - name: aws + mountPath: /etc/mas/creds/aws + - name: db2u-certificate + mountPath: /etc/mas/creds/db2u-certificate + command: + - /bin/sh + - -c + - | + + set -e + + source /mascli/functions/gitops_utils + + function wait_for_resource { + RES_TYPE="$1" + RES_NAME="$2" + RES_NS="$3" + RETRIES=${4:-10} + RETRY_DELAY_SECONDS=${5:-30} + + for (( c=1; c<="${RETRIES}"; c++ )); do + + echo "... attempt ${c} of ${RETRIES}" + + rc=0 + oc get "${RES_TYPE}/${RES_NAME}" -n "${RES_NAMESPACE}" || rc=$? + if [[ "$rc" == "0" ]]; then + echo "...... success" + return 0 + fi + + if [[ "${c}" -lt "${RETRIES}" ]]; then + echo "...... failed (rc: ${rc}), retry in ${RETRY_DELAY_SECONDS}s" + sleep $RETRY_DELAY_SECONDS + fi + done + + echo "...... failed, no attempts remain" + return 1 + } + + # DB2 operator does not automatically apply dbConfig parameters set on the Db2uCluster CR + # Instead, a script /db2u/scripts/apply-db2cfg-settings.sh must be executed on one of the db2u pods + # However, this does not always seem to work and no indication is given in the output of the script whether it worked or not. + # One approach is to check the current configuration parameters (db2 get db cfg for ${DB2_DBNAME}) one by one and verify that their value aligns with that set in the CR. + # However, this is not straightforward since DB2 implicitly reformats certain param values (e.g. APPLHEAPSZ: '8192 AUTOMATIC' is reformatted to AUTOMATIC(8192)). + # Until we can come up with a better way of doing this (or the Db2u operator is fixed), we will take the approach used in ansible-devops, + # whereby the value of single parameter (CHNGPGS_THRESH) is checked against a known value (40) to see if the script executed successfully (and retry if not) + # See https://github.com/ibm-mas/ansible-devops/blob/b9f3ef5b7999640b0a31d0aba518ba85ef8b704f/ibm/mas_devops/roles/suite_db2_setup_for_manage/tasks/apply-db2-config-settings.yml#L39 + function db2apply { + RETRIES=${1:-5} + RETRY_DELAY_SECONDS=${2:-30} + + for (( c=1; c<="${RETRIES}"; c++ )); do + echo "" + echo "... attempt ${c} of ${RETRIES}" + oc exec -n ${DB2_NAMESPACE} c-${DB2_INSTANCE_NAME}-db2u-0 -- su -lc '/db2u/scripts/apply-db2cfg-settings.sh --setting all | tee /tmp/apply-db2cfg-settings.log' db2inst1 + # no useful info in return code of this script + + rc=0 + oc exec -n ${DB2_NAMESPACE} c-${DB2_INSTANCE_NAME}-db2u-0 -- su -lc 'db2 get db cfg for '${DB2_DBNAME}' | grep "(CHNGPGS_THRESH) = 40"' db2inst1 || rc=$? + if [[ "$rc" == "0" ]]; then + echo "...... success" + return 0 + fi + + if [[ "${c}" -lt "${RETRIES}" ]]; then + echo "...... failed (rc: ${rc}), retry in ${RETRY_DELAY_SECONDS}s" + sleep $RETRY_DELAY_SECONDS + fi + done + + echo "...... failed, no attempts remain" + return 1 + } + + export DB2_CONFIG_SECRET=${ACCOUNT_ID}/${CLUSTER_ID}/${MAS_INSTANCE_ID}/jdbc/${DB2_INSTANCE_NAME}/config + + echo "" + echo "================================================================================" + echo "Settings" + echo "================================================================================" + echo "ACCOUNT_ID .......................... ${ACCOUNT_ID}" + echo "REGION_ID ........................... ${REGION_ID}" + echo "CLUSTER_ID .......................... ${CLUSTER_ID}" + echo "MAS_INSTANCE_ID ..................... ${MAS_INSTANCE_ID}" + echo "MAS_APP_ID .......................... ${MAS_APP_ID}" + echo "AVP_TYPE ............................ ${AVP_TYPE}" + echo "DB2_NAMESPACE ....................... ${DB2_NAMESPACE}" + echo "DB2_INSTANCE_NAME ................... ${DB2_INSTANCE_NAME}" + echo "DB2_DBNAME .......................... ${DB2_DBNAME}" + + export SM_AWS_ACCESS_KEY_ID=$(cat /etc/mas/creds/aws/aws_access_key_id) + export SM_AWS_SECRET_ACCESS_KEY=$(cat /etc/mas/creds/aws/aws_secret_access_key) + export SM_AWS_REGION=${REGION_ID} + + echo "" + echo "================================================================================" + echo "Waiting for pod c-${DB2_INSTANCE_NAME}-db2u-0 to be present before continuing (timeout 300s)" + echo "================================================================================" + wait_for_resource "pod" "c-${DB2_INSTANCE_NAME}-db2u-0" "${DB2_NAMESPACE}" + + echo "" + echo "================================================================================" + echo "Waiting for pod c-${DB2_INSTANCE_NAME}-db2u-0 to report Ready=True before continuing (timeout 300s)" + echo "================================================================================" + oc wait --for=condition=Ready pod/c-${DB2_INSTANCE_NAME}-db2u-0 --timeout 300s -n ${DB2_NAMESPACE} + + echo "" + echo "================================================================================" + echo "Waiting for service c-${DB2_INSTANCE_NAME}-db2u-engn-svc to be present before continuing (timeout 300s)" + echo "================================================================================" + wait_for_resource "svc" "c-${DB2_INSTANCE_NAME}-db2u-engn-svc" "${DB2_NAMESPACE}" + + if [[ "$MAS_APP_ID" == "manage" ]]; then + echo "" + echo "================================================================================" + echo "Invoke Suite DB2 setup for Manage" + echo "================================================================================" + # Copy and run a script on the db2u pod to perform Manage-specific setup tasks + + # Path to the generated script, on both this pod and on the db2u pod + SETUPDB_SH_PATH="/tmp/setupdb.sh" + + echo "" + echo "Create ${SETUPDB_SH_PATH}" + echo "--------------------------------------------------------------------------------" + + # Generate a script to copy and run on the db2u pod + cat > ${SETUPDB_SH_PATH} << EOF + #!/bin/bash + + TBSP_SQL="/tmp/.tbsp.sql" + + if [ -f \$TBSP_SQL ]; then + rm \$TBSP_SQL + fi + + if ! db2gcf -s | grep Available >/dev/null; then + + db2_kill + ipclean -a + db2start + + if ! db2gcf -s | grep Available >/dev/null; then + echo "Instance is not up. Please check." + exit 1 + fi + + fi + + db2 connect to ${DB2_DBNAME} + if [ \$? != 0 ]; then + echo "Failed to connect to database!" + exit 1 + fi + + db2 -x "select case when NOT exists( + select 1 + from syscat.bufferpools + where BPNAME='MAXBUFPOOL') + then 'CREATE BUFFERPOOL MAXBUFPOOL SIZE 8192 AUTOMATIC PAGESIZE 32 K;' + else '--' + end + from sysibm.sysdummy1" >\$TBSP_SQL + maxd_bpf=\$? + db2 -x "select case when NOT exists( + select 1 + from syscat.bufferpools + where BPNAME='MAXBUFPOOLINDX') + then 'CREATE BUFFERPOOL MAXBUFPOOLINDX SIZE 8192 AUTOMATIC PAGESIZE 32 K;' + else '--' + end + from sysibm.sysdummy1" >>\$TBSP_SQL + maxi_bpf=\$? + db2 -x "select case when NOT exists( + select 1 + from syscat.bufferpools + where BPNAME='MAXTEMPBP') + then 'CREATE BUFFERPOOL MAXTEMPBP SIZE 8192 AUTOMATIC PAGESIZE 32 K;' + else '--' + end + from sysibm.sysdummy1" >>\$TBSP_SQL + maxtmp_bpf=\$? + db2 -x "select case when NOT exists( + select 1 + from syscat.tablespaces + where TBSPACE='MAXDATA') + then 'CREATE TABLESPACE MAXDATA PAGESIZE 32 K MANAGED BY AUTOMATIC STORAGE BUFFERPOOL MAXBUFPOOL;' + else '--' + end + from sysibm.sysdummy1" >>\$TBSP_SQL + maxd_tbsp=\$? + db2 -x "select case when NOT exists( + select 1 + from syscat.tablespaces + where TBSPACE='MAXINDEX') + then 'CREATE TABLESPACE MAXINDEX PAGESIZE 32 K MANAGED BY AUTOMATIC STORAGE BUFFERPOOL MAXBUFPOOLINDX;' + else '--' + end + from sysibm.sysdummy1" >>\$TBSP_SQL + maxi_tbsp=\$? + db2 -x "select case when NOT exists( + select 1 + from syscat.tablespaces + where TBSPACE='MAXTEMP') + then 'CREATE TEMPORARY TABLESPACE MAXTEMP PAGESIZE 32 K MANAGED BY AUTOMATIC STORAGE BUFFERPOOL MAXTEMPBP;' + else '--' + end + from sysibm.sysdummy1" >>\$TBSP_SQL + maxtmp_tbsp=\$? + + if [[ "\$maxd_bpf" -eq "0" && "\$maxi_bpf" -eq "0" && "\$maxtmp_bpf" -eq "0" && "\$maxd_tbsp" -eq "0" && "\$maxi_tbsp" -eq "0" && "\$maxtmp_tbsp" -eq "0" && -f "\$TBSP_SQL" ]]; then + db2 -tvf \$TBSP_SQL + rm \$TBSP_SQL + else + echo "Error detected." + exit 1 + fi + EOF + # IMPORTANT: Do not make any changes to the "EOF" line above (including its indentation) + + chmod +x ${SETUPDB_SH_PATH} + + echo "" + echo "Copy ${SETUPDB_SH_PATH} to ${DB2_NAMESPACE}/c-${DB2_INSTANCE_NAME}-db2u-0" + echo "--------------------------------------------------------------------------------" + oc cp ${SETUPDB_SH_PATH} ${DB2_NAMESPACE}/c-${DB2_INSTANCE_NAME}-db2u-0:${SETUPDB_SH_PATH} -c db2u || exit $? + + echo "" + echo "Executing ${SETUPDB_SH_PATH} file on ${DB2_NAMESPACE}/c-${DB2_INSTANCE_NAME}-db2u-0" + echo "--------------------------------------------------------------------------------" + oc exec -n ${DB2_NAMESPACE} c-${DB2_INSTANCE_NAME}-db2u-0 -- su -lc "${SETUPDB_SH_PATH} | tee /tmp/setupdb.log" db2inst1 || exit $? + + echo "" + echo "Creating /mnt/backup/MIRRORLOGPATH in c-${DB2_INSTANCE_NAME}-db2u-0" + echo "--------------------------------------------------------------------------------" + oc exec -n ${DB2_NAMESPACE} c-${DB2_INSTANCE_NAME}-db2u-0 -- su -lc "mkdir -p /mnt/backup/MIRRORLOGPATH" db2inst1 || exit $? + + echo "" + echo "Creating /mnt/bludata0/db2/archive_log in c-${DB2_INSTANCE_NAME}-db2u-0" + echo "--------------------------------------------------------------------------------" + oc exec -n ${DB2_NAMESPACE} c-${DB2_INSTANCE_NAME}-db2u-0 -- su -lc "mkdir -p /mnt/bludata0/db2/archive_log/" db2inst1 || exit $? + + echo "" + echo "Creating /mnt/backup/staging in c-${DB2_INSTANCE_NAME}-db2u-0" + echo "--------------------------------------------------------------------------------" + oc exec -n ${DB2_NAMESPACE} c-${DB2_INSTANCE_NAME}-db2u-0 -- su -lc "mkdir -p /mnt/backup/staging" db2inst1 || exit $? + + echo "" + echo "================================================================================" + echo "Calling apply-db2cfg-settings.sh file on c-${DB2_INSTANCE_NAME}-db2u-0" + echo "================================================================================" + db2apply || exit $? + + fi # [[ "$MAS_APP_ID" == "manage" ]] + + + echo "" + echo "================================================================================" + echo "Invoke Suite DB2 Backup" + echo "================================================================================" + # Some parameters like LOGARCHMETH1 being set can cause a backup to be needed as it moves + # to archival logging. + # Copy and run a script on the db2u pod to perform that backup now if we are in that state. + + # Path to the generated script, on both this pod and on the db2u pod + BACKUPDB_SH_PATH="/tmp/backupdb.sh" + + echo "" + echo "Create ${BACKUPDB_SH_PATH}" + echo "--------------------------------------------------------------------------------" + + # Generate a script to copy and run on the db2u pod + cat > ${BACKUPDB_SH_PATH} << EOF + #!/bin/bash + + # Check that connect returns SQL1116N which means BACKUP PENDING state + if db2 connect to ${DB2_DBNAME} | grep SQL1116N >/dev/null; then + echo "backupdb.sh: Database connect returning SQL1116N, do backup now" + else + echo "backupdb.sh: Database connect not returning SQL1116N, nothing to do, exit now" + exit 0 + fi + + echo "backupdb.sh: Creating backup folder /mnt/backup" + mkdir -p /mnt/backup + rc=\$? + [ \$rc -ne 0 ] && exit \$rc + + echo "backupdb.sh: db2 force applications" + db2 force application all + rc=\$? + [ \$rc -ne 0 ] && exit \$rc + + echo "backupdb.sh: Turn off comms manager" + db2set -null DB2COMM + rc=\$? + [ \$rc -ne 0 ] && exit \$rc + + echo "backupdb.sh: Deactivate database" + db2 deactivate database ${DB2_DBNAME} + rc=\$? + [ \$rc -ne 0 ] && exit \$rc + + echo "backupdb.sh: db2stop" + db2stop force + rc=\$? + [ \$rc -ne 0 ] && exit \$rc + + echo "backupdb.sh: db2start in admin mode" + db2start admin mode restricted access + rc=\$? + [ \$rc -ne 0 ] && exit \$rc + + # dbstart does not always start straight away, wait 20 seconds + sleep 20 + + echo "backupdb.sh: db2 backup db ${DB2_DBNAME} on all dbpartitionnums" + db2 backup db ${DB2_DBNAME} on all dbpartitionnums to /mnt/backup + rc=\$? + [ \$rc -ne 0 ] && exit \$rc + + echo "backupdb.sh: db2stop" + db2stop force + rc=\$? + [ \$rc -ne 0 ] && exit \$rc + + echo "backupdb.sh: db2set comms manager" + db2set DB2COMM=TCPIP,SSL + rc=\$? + [ \$rc -ne 0 ] && exit \$rc + + echo "backupdb.sh: db2start" + db2start + rc=\$? + [ \$rc -ne 0 ] && exit \$rc + EOF + # IMPORTANT: Do not make any changes to the "EOF" line above (including its indentation) + + chmod +x ${BACKUPDB_SH_PATH} || exit $? + + echo "" + echo "Copy ${BACKUPDB_SH_PATH} to ${DB2_NAMESPACE}/c-${DB2_INSTANCE_NAME}-db2u-0" + echo "--------------------------------------------------------------------------------" + oc cp ${BACKUPDB_SH_PATH} ${DB2_NAMESPACE}/c-${DB2_INSTANCE_NAME}-db2u-0:${BACKUPDB_SH_PATH} -c db2u || exit $? + + echo "" + echo "Executing ${BACKUPDB_SH_PATH} file on ${DB2_NAMESPACE}/c-${DB2_INSTANCE_NAME}-db2u-0" + echo "--------------------------------------------------------------------------------" + oc exec -n ${DB2_NAMESPACE} c-${DB2_INSTANCE_NAME}-db2u-0 -- su -lc "${BACKUPDB_SH_PATH} | tee /tmp/backupdb.log" db2inst1 || exit $? + + echo "" + echo "================================================================================" + echo "Setting ${DB2_CONFIG_SECRET} secret" + echo "================================================================================" + + + echo "" + echo "Fetch CA cert from db2u-certicate-${DB2_INSTANCE_NAME} secret in ${DB2_NAMESPACE}" + echo "--------------------------------------------------------------------------------" + export DB2_CA_PEM=$(cat /etc/mas/creds/db2u-certificate/ca.crt | base64 -w0) + if [[ -z "${DB2_CA_PEM}" ]]; then + echo "Failed to fetch db2 ca pem" + exit 1 + fi + + echo "" + echo "Fetch ssl-server target port of c-${DB2_INSTANCE_NAME}-db2u-engn-svc service in ${DB2_NAMESPACE}" + echo "--------------------------------------------------------------------------------" + export DB2_TLS_SERVICEPORT=$(oc get svc c-${DB2_INSTANCE_NAME}-db2u-engn-svc -n ${DB2_NAMESPACE} -o=jsonpath="{.spec.ports[?(@.name=='ssl-server')].targetPort}") + if [[ -z "${DB2_TLS_SERVICEPORT}" ]]; then + echo "Failed to fetch db2 tls service port" + exit 1 + fi + + echo "" + echo "Construsting JDBC_CONNECTION_URL" + echo "--------------------------------------------------------------------------------" + export JDBC_CONNECTION_URL="jdbc:db2://c-${DB2_INSTANCE_NAME}-db2u-engn-svc.${DB2_NAMESPACE}.svc:${DB2_TLS_SERVICEPORT}/${DB2_DBNAME}:sslConnection=true;sslVersion=${DB2_TLS_VERSION};" + + echo "" + echo "Updating Secrets Manager" + echo "--------------------------------------------------------------------------------" + sm_login + sm_update_secret ${DB2_CONFIG_SECRET} "{ \"jdbc_connection_url\": \"${JDBC_CONNECTION_URL}\", \"jdbc_instance_name\": \"${DB2_INSTANCE_NAME}\", \"ca_b64\": \"${DB2_CA_PEM}\", \"db2_dbname\": \"${DB2_DBNAME}\", \"db2_namespace\": \"${DB2_NAMESPACE}\" }" || exit $? + + + restartPolicy: Never + + serviceAccountName: "postsync-sa-{{ .Values.db2_instance_name }}" + volumes: + - name: aws + secret: + secretName: "aws-{{ .Values.db2_instance_name }}" + defaultMode: 420 + optional: false + - name: db2u-certificate + secret: + secretName: "db2u-certificate-{{ .Values.db2_instance_name }}" + defaultMode: 420 + optional: false + backoffLimit: 4 diff --git a/instance-applications/120-ibm-db2u-database/values.yaml b/instance-applications/120-ibm-db2u-database/values.yaml new file mode 100644 index 000000000..0defed672 --- /dev/null +++ b/instance-applications/120-ibm-db2u-database/values.yaml @@ -0,0 +1,2 @@ +--- +ibm_entitlement_key: xxx diff --git a/instance-applications/130-ibm-db2u-jdbc-config-rotate-password/Chart.yaml b/instance-applications/130-ibm-db2u-jdbc-config-rotate-password/Chart.yaml new file mode 100644 index 000000000..f45ea5634 --- /dev/null +++ b/instance-applications/130-ibm-db2u-jdbc-config-rotate-password/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +name: ibm-db2u-jdbc-config-rotate-password +description: IBM DB2U Rotate password JDBC Confifg +type: application +version: 1.0.0 diff --git a/instance-applications/130-ibm-db2u-jdbc-config-rotate-password/README.md b/instance-applications/130-ibm-db2u-jdbc-config-rotate-password/README.md new file mode 100644 index 000000000..2e5eef3a2 --- /dev/null +++ b/instance-applications/130-ibm-db2u-jdbc-config-rotate-password/README.md @@ -0,0 +1,3 @@ +IBM DB2U JDBC rotate password Configuration +=============================================================================== +Rotate DB2 user password \ No newline at end of file diff --git a/instance-applications/130-ibm-db2u-jdbc-config-rotate-password/templates/00-db2u_jdbccfg-credentials.yaml b/instance-applications/130-ibm-db2u-jdbc-config-rotate-password/templates/00-db2u_jdbccfg-credentials.yaml new file mode 100644 index 000000000..b81f4f203 --- /dev/null +++ b/instance-applications/130-ibm-db2u-jdbc-config-rotate-password/templates/00-db2u_jdbccfg-credentials.yaml @@ -0,0 +1,16 @@ +--- +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: "jdbc-{{ .Values.db2_instance_name | lower }}-credentials" + namespace: "mas-{{ .Values.mas_instance_id }}-core" + annotations: + argocd.argoproj.io/sync-wave: "150" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +stringData: + username: "{{ .Values.db2_jdbc_username }}" + password: "{{ .Values.jdbc_instance_password }}" diff --git a/instance-applications/130-ibm-db2u-jdbc-config-rotate-password/values.yaml b/instance-applications/130-ibm-db2u-jdbc-config-rotate-password/values.yaml new file mode 100644 index 000000000..0defed672 --- /dev/null +++ b/instance-applications/130-ibm-db2u-jdbc-config-rotate-password/values.yaml @@ -0,0 +1,2 @@ +--- +ibm_entitlement_key: xxx diff --git a/instance-applications/130-ibm-jdbc-config/Chart.yaml b/instance-applications/130-ibm-jdbc-config/Chart.yaml new file mode 100644 index 000000000..5fa41f31d --- /dev/null +++ b/instance-applications/130-ibm-jdbc-config/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +name: ibm-jdbc-config +description: IBM JDBC Config +type: application +version: 1.0.0 diff --git a/instance-applications/130-ibm-jdbc-config/README.md b/instance-applications/130-ibm-jdbc-config/README.md new file mode 100644 index 000000000..5baa841e6 --- /dev/null +++ b/instance-applications/130-ibm-jdbc-config/README.md @@ -0,0 +1,8 @@ +IBM JDBC Configuration +=============================================================================== + +Create a JdbcCfg CR instance and associated credentials secret for use by MAS. + +Contains a post-delete hook (`postdelete-delete-cr.yaml`) that will ensure the config CR is deleted when the ArgoCD application managing this chart is deleted (this will not happen by default as the config CR is asserted to be owned by the `Suite` CR by the MAS entity managers). + +If using incluster-db2, a pre-sync hook (`00-presync-create-db2-user_Job.yaml`) will run that sets up an LDAP user in DB2 with the credentials provided in the JDBC config. \ No newline at end of file diff --git a/instance-applications/130-ibm-jdbc-config/templates/00-presync-create-db2-user_Job.yaml b/instance-applications/130-ibm-jdbc-config/templates/00-presync-create-db2-user_Job.yaml new file mode 100644 index 000000000..7b34a7a30 --- /dev/null +++ b/instance-applications/130-ibm-jdbc-config/templates/00-presync-create-db2-user_Job.yaml @@ -0,0 +1,302 @@ +{{- if eq .Values.jdbc_type "incluster-db2" }} + +--- + +# We need to embed the supplied JDBC creds in a K8S secret so we can safely access them from +# the Job without exposing them via the ArgoCD resources +# NOTE: do not want to use the actual usersupplied jdbc creds secret as I'd have to annotate that +# to be a presync hook which will cause ArgoCD to delete it and could cause problems; safer just to create a dedicated secret for the presync hook. +# (alternatively could look up the secret via SM in the Job script but it's better this way as we use the actual credentials set in the chart values) +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: "presync-creds-jdbccfg-{{ .Values.mas_config_name }}" + namespace: "mas-{{ .Values.instance_id }}-core" + annotations: + argocd.argoproj.io/hook: PreSync + argocd.argoproj.io/hook-delete-policy: HookSucceeded +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +stringData: + username: "{{ .Values.jdbc_instance_username }}" + password: "{{ .Values.jdbc_instance_password }}" + +--- +# Service account that is authorized to read k8s secrets (needed by the job) +kind: ServiceAccount +apiVersion: v1 +metadata: + name: "presync-sa-jdbccfg-{{ .Values.mas_config_name }}" + namespace: "mas-{{ .Values.instance_id }}-core" + annotations: + argocd.argoproj.io/hook: PreSync + argocd.argoproj.io/hook-delete-policy: HookSucceeded +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: "presync-jdbccfg-sa-rb-{{ .Values.mas_config_name }}" + annotations: + argocd.argoproj.io/hook: PreSync + argocd.argoproj.io/hook-delete-policy: HookSucceeded +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +subjects: + - kind: ServiceAccount + name: "presync-sa-jdbccfg-{{ .Values.mas_config_name }}" + namespace: "mas-{{ .Values.instance_id }}-core" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin + + +--- + +# Permit outbound communication by the Job pods +# (Needed to communicate with the K8S HTTP API and AWS SM) +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: "presync-jdbccfg-np-{{ .Values.mas_config_name }}" + namespace: "mas-{{ .Values.instance_id }}-core" + annotations: + argocd.argoproj.io/hook: PreSync + argocd.argoproj.io/hook-delete-policy: HookSucceeded +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + podSelector: + matchLabels: + app: "presync-jdbc-crt-user-{{ .Values.mas_config_name }}" + egress: + - {} + policyTypes: + - Egress + + +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: "presync-jdbc-crt-user-{{ .Values.mas_config_name }}" + namespace: "mas-{{ .Values.instance_id }}-core" + annotations: + argocd.argoproj.io/hook: PreSync + argocd.argoproj.io/hook-delete-policy: HookSucceeded +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + template: + metadata: + labels: + app: "presync-jdbc-crt-user-{{ .Values.mas_config_name }}" +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 8 }} +{{- end }} + spec: + containers: + - name: run + # TODO: use a dedicated image with a smaller footprint for this sort of thing? + # Just using cli for now since it has all the deps we need to talk with AWS SM + image: quay.io/ibmmas/cli:9.4.0-pre.gitops + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 200m + memory: 512Mi + requests: + cpu: 10m + memory: 64Mi + env: + - name: DB2_NAMESPACE + value: "{{ .Values.db2_namespace }}" + - name: DB2_INSTANCE_NAME + value: "{{ .Values.jdbc_instance_name }}" + - name: DB2_DBNAME + value: "{{ .Values.db2_dbname }}" + volumeMounts: + - name: db2-credentials + mountPath: /etc/mas/creds/db2-credentials + command: + - /bin/sh + - -c + - | + + set -e + + source /mascli/functions/gitops_utils + + export DB2_CREDENTIALS_SECRET_PATH="/etc/mas/creds/db2-credentials" + + echo "" + echo "================================================================================" + echo "Settings" + echo "================================================================================" + echo "DB2_NAMESPACE ....................... ${DB2_NAMESPACE}" + echo "DB2_INSTANCE_NAME ................... ${DB2_INSTANCE_NAME}" + echo "DB2_DBNAME .......................... ${DB2_DBNAME}" + echo "DB2_CREDENTIALS_SECRET_PATH ......... ${DB2_CREDENTIALS_SECRET_PATH}" + + echo "" + echo "================================================================================" + echo "Checking DB2 CRD db2uclusters.db2u.databases.ibm.com is ready (retries every ~10 seconds for ~5 minutes)" + echo "================================================================================" + + # wait till CRD db2uclusters.db2u.databases.ibm.com NamesAccepted=True STARTS + wait_period=0 + while true; do + export DB2_CRD_NAMES_ACCEPTED_STATUS=$(oc get crd db2uclusters.db2u.databases.ibm.com -o=jsonpath="{.status.conditions[?(@.type=='NamesAccepted')].status}") + echo "DB2_CRD_NAMES_ACCEPTED_STATUS .... ${DB2_CRD_NAMES_ACCEPTED_STATUS}" + if [[ "$DB2_CRD_NAMES_ACCEPTED_STATUS" == "True" ]]; then + break + fi + + wait_period=$(($wait_period+10)) + if [ $wait_period -gt 300 ];then + echo "CRD db2uclusters.db2u.databases.ibm.com is not ready with in 300 sec, exiting" + exit 1 + else + echo "CRD db2uclusters.db2u.databases.ibm.com is not ready, trying again in 10 seconds" + sleep 10 + fi + done + # wait till CRD db2uclusters.db2u.databases.ibm.com NamesAccepted=True DONE + + echo "" + echo "================================================================================" + echo "Reading LDAP credentials from ${DB2_CREDENTIALS_SECRET_PATH}" + echo "================================================================================" + export DB2_LDAP_USERNAME=$(cat /etc/mas/creds/db2-credentials/username) + export DB2_LDAP_PASSWORD=$(cat /etc/mas/creds/db2-credentials/password) + + echo "" + echo "================================================================================" + echo "Checking if ${DB2_LDAP_USERNAME} user exists already" + echo "================================================================================" + + echo "" + echo "Looking up name of DB2 db2u pod" + echo "--------------------------------------------------------------------------------" + export DB2_DB2U_POD_NAME=$(oc get pods -o custom-columns=POD:.metadata.name -l app=${DB2_INSTANCE_NAME},role=db -n ${DB2_NAMESPACE} --no-headers) + if [[ -z "${DB2_DB2U_POD_NAME}" ]]; then + echo "Failed to look up DB2 db2u pod name" + exit 1 + fi + echo "DB2_DB2U_POD_NAME .......................... ${DB2_DB2U_POD_NAME}" + + + echo "" + echo "Executing command on DB2 db2u pod; su -lc \"id ${DB2_LDAP_USERNAME}\"" + echo "--------------------------------------------------------------------------------" + # Using the || syntax to avoid surfacing a non 0 rc and exitting the job (without having to disable set -e) + DB2_USER_FOUND=true + oc exec -it -n ${DB2_NAMESPACE} ${DB2_DB2U_POD_NAME} -- su -lc "id ${DB2_LDAP_USERNAME}" || DB2_USER_FOUND=false + if [[ "${DB2_USER_FOUND}" == "true" ]]; then + echo "DB2 user exists already, exiting now" + exit 0 + fi + echo "DB2 user does not exist yet" + + echo "" + echo "================================================================================" + echo "Creating user ${DB2_LDAP_USERNAME}" + echo "================================================================================" + + echo "" + echo "Looking up name of DB2 LDAP pod" + echo "--------------------------------------------------------------------------------" + export DB2_LDAP_POD_NAME=$(oc get pods -o custom-columns=POD:.metadata.name -l app=${DB2_INSTANCE_NAME},role=ldap -n ${DB2_NAMESPACE} --no-headers) + if [[ -z "${DB2_LDAP_POD_NAME}" ]]; then + echo "Failed to look up DB2 LDAP pod name" + exit 1 + fi + echo "DB2_LDAP_POD_NAME .......................... ${DB2_LDAP_POD_NAME}" + + + echo "" + echo "Executing addLdapUser.py script in ${DB2_LDAP_POD_NAME} pod" + echo "--------------------------------------------------------------------------------" + oc exec -it -n ${DB2_NAMESPACE} ${DB2_LDAP_POD_NAME} -- /opt/ibm/ldap_scripts/addLdapUser.py -u ${DB2_LDAP_USERNAME} -r admin -p ${DB2_LDAP_PASSWORD} + echo "..... rc $?" + + echo "" + echo "Verifying that ${DB2_LDAP_USERNAME} can authenticate with ${DB2_DBNAME} database (retries every ~10 seconds for max ~5 minutes)" + echo "--------------------------------------------------------------------------------" + wait_period=0 + while true; do + + # Using the || syntax to avoid surfacing a non 0 rc and exitting the job (without having to disable set -e) + AUTH_SUCCESS=true + oc exec -it c-${DB2_INSTANCE_NAME}-db2u-0 -n ${DB2_NAMESPACE} -c db2u -- su -lc "db2 connect to ${DB2_DBNAME} user ${DB2_LDAP_USERNAME} using ${DB2_LDAP_PASSWORD}" db2inst1 || AUTH_SUCCESS=false + if [[ "${AUTH_SUCCESS}" == "true" ]]; then + echo "Authenticated successfully" + break + fi + echo "... failed to authenticate this time" + + wait_period=$(($wait_period+10)) + if [ $wait_period -gt 300 ]; then + echo "... >5 minutes have elapsed, failing job." + exit 1 + else + echo "... retrying in 10 seconds" + sleep 10 + fi + done + + + echo "" + echo "================================================================================" + echo "Wait for c-${DB2_INSTANCE_NAME}-db2u Statefulset to report ready (retries every ~10 seconds for max ~5 minutes)" + echo "================================================================================" + wait_period=0 + while true; do + + export READY_REPLICAS=$(oc get statefulset c-${DB2_INSTANCE_NAME}-db2u -n ${DB2_NAMESPACE} -o=jsonpath="{.status.readyReplicas}") + export REPLICAS=$(oc get statefulset c-${DB2_INSTANCE_NAME}-db2u -n ${DB2_NAMESPACE} -o=jsonpath="{.status.replicas}") + echo "${READY_REPLICAS}/${REPLICAS} replicas are ready" + + if [[ ${READY_REPLICAS} -ge 1 && ${REPLICAS} -ge 1 && ${READY_REPLICAS} -eq ${REPLICAS} ]]; then + echo "Statefulset c-${DB2_INSTANCE_NAME}-db2u is ready" + break + fi + + wait_period=$(($wait_period+10)) + if [ $wait_period -gt 300 ]; then + echo "... >5 minutes have elapsed, failing job." + exit 1 + else + echo "... retrying in 10 seconds" + sleep 10 + fi + done + + restartPolicy: Never + + # TODO: is this the correct SA to use here? + # No, probably want to add a more restricted SA that can just do things that these post-sync jobs need to do + serviceAccountName: "presync-sa-jdbccfg-{{ .Values.mas_config_name }}" + volumes: + - name: db2-credentials + secret: + secretName: "presync-creds-jdbccfg-{{ .Values.mas_config_name }}" + defaultMode: 420 + optional: false + backoffLimit: 4 + +{{- end }} diff --git a/instance-applications/130-ibm-jdbc-config/templates/01-suite_jdbccfg.yaml b/instance-applications/130-ibm-jdbc-config/templates/01-suite_jdbccfg.yaml new file mode 100644 index 000000000..96bba0021 --- /dev/null +++ b/instance-applications/130-ibm-jdbc-config/templates/01-suite_jdbccfg.yaml @@ -0,0 +1,50 @@ +--- +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: "{{ .Values.mas_config_name }}-credentials" + namespace: "mas-{{ .Values.instance_id }}-core" + annotations: + argocd.argoproj.io/sync-wave: "150" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +stringData: + username: "{{ .Values.jdbc_instance_username }}" + password: "{{ .Values.jdbc_instance_password }}" +--- +apiVersion: config.mas.ibm.com/v1 +kind: JdbcCfg +metadata: + name: "{{ .Values.mas_config_name }}" + namespace: "mas-{{ .Values.instance_id }}-core" + annotations: + argocd.argoproj.io/sync-wave: "151" + labels: +{{- if eq .Values.mas_config_scope "system" }} +{{ .Values.system_suite_jdbccfg_labels | toYaml | indent 4 }} +{{ end }} +{{- if eq .Values.mas_config_scope "ws" }} +{{ .Values.ws_suite_jdbccfg_labels | toYaml | indent 4 }} +{{ end }} +{{- if eq .Values.mas_config_scope "app" }} +{{ .Values.app_suite_jdbccfg_labels | toYaml | indent 4 }} +{{ end }} +{{- if eq .Values.mas_config_scope "wsapp" }} +{{ .Values.wsapp_suite_jdbccfg_labels | toYaml | indent 4 }} +{{ end }} +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + displayName: "{{ .Values.mas_config_name }}" + config: + url: "{{ .Values.jdbc_connection_url }}" + sslEnabled: true + credentials: + secretName: "{{ .Values.mas_config_name }}-credentials" + certificates: + - alias: "{{ .Values.jdbc_instance_name | lower }}" +{{ .Values.jdbc_ca_pem | toYaml | indent 6 }} diff --git a/instance-applications/130-ibm-jdbc-config/templates/postdelete-delete-cr.yaml b/instance-applications/130-ibm-jdbc-config/templates/postdelete-delete-cr.yaml new file mode 100644 index 000000000..9c5a819b6 --- /dev/null +++ b/instance-applications/130-ibm-jdbc-config/templates/postdelete-delete-cr.yaml @@ -0,0 +1,122 @@ +{{- if .Values.use_postdelete_hooks }} + +{{ $cr_name := .Values.mas_config_name }} +{{ $cr_kind := .Values.mas_config_kind }} +{{ $cr_api_version := .Values.mas_config_api_version }} + +{{ $job_name := printf "postdelete-delete-cr-job-%s" $cr_name }} + +# NOTE: depends on resources created in ibm-mas-suite chart (01-postdelete-crs-resources) +# The values below must align with the values in that file +{{ $role_name := "postdelete-delete-cr-r" }} +{{ $sa_name := "postdelete-delete-cr-sa" }} +{{ $rb_name := "postdelete-delete-cr-rb" }} +{{ $np_name := "postdelete-delete-cr-np" }} +{{ $job_label := "postdelete-delete-cr-job" }} +{{ $ns := printf "mas-%s-core" .Values.instance_id }} + + +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ $job_name }} + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/hook: PostDelete + argocd.argoproj.io/hook-delete-policy: HookSucceeded +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + template: + metadata: + labels: + app: {{ $job_label }} +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 8 }} +{{- end }} + spec: + containers: + - name: run + image: quay.io/ibmmas/cli:9.4.0-pre.gitops + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 200m + memory: 512Mi + requests: + cpu: 10m + memory: 64Mi + env: + - name: CR_NAMESPACE + value: {{ $ns }} + - name: CR_NAME + value: {{ $cr_name }} + + - name: CR_API_VERSION + value: {{ $cr_api_version }} + + - name: CR_KIND + value: {{ $cr_kind }} + + volumeMounts: [] + command: + - /bin/sh + - -c + - | + + set -e + + function delete_oc_resource(){ + RESOURCE=$1 + NAMESPACE=$2 + echo + echo "------------------------------------------------------------------" + echo "Check if resource $RESOURCE is present in namespace $NAMESPACE " + + # don't want a non-zero rc from oc delete to cause the job to fail + # so, temporarily set +e + set +e + RESOURCE_NAME=$(oc get $RESOURCE -n $NAMESPACE -o=jsonpath="{.metadata.name}") + set -e + if [[ -z "${RESOURCE_NAME}" ]]; then + echo "$RESOURCE not found, skipping" + return 0 + fi + + echo "oc delete resource $RESOURCE in namespace $NAMESPACE " + + # don't want a non-zero rc from oc delete to cause the job to fail (since we then want to try patching out the finalizers) + # so, temporarily set +e + set +e + oc delete $RESOURCE -n $NAMESPACE --timeout=300s --wait=true + return_code=$? + set -e + + echo "Verify that resource $RESOURCE is now absent in namespace $NAMESPACE " + # don't want a non-zero rc from oc delete to cause the job to fail + # so, temporarily set +e + set +e + RESOURCE_NAME=$(oc get $RESOURCE -n $NAMESPACE -o=jsonpath="{.metadata.name}") + set -e + if [[ -n "${RESOURCE_NAME}" ]]; then + echo "$RESOURCE still present, failing job" + exit 1 + fi + + echo "... verified" + return 0 + + } + + + delete_oc_resource "${CR_KIND}.${CR_API_VERSION}/${CR_NAME}" "${CR_NAMESPACE}" + + + restartPolicy: Never + serviceAccountName: {{ $sa_name }} + volumes: [] + backoffLimit: 4 +{{- end }} diff --git a/instance-applications/130-ibm-jdbc-config/values.yaml b/instance-applications/130-ibm-jdbc-config/values.yaml new file mode 100644 index 000000000..0defed672 --- /dev/null +++ b/instance-applications/130-ibm-jdbc-config/values.yaml @@ -0,0 +1,2 @@ +--- +ibm_entitlement_key: xxx diff --git a/instance-applications/130-ibm-kafka-config/Chart.yaml b/instance-applications/130-ibm-kafka-config/Chart.yaml new file mode 100644 index 000000000..850499668 --- /dev/null +++ b/instance-applications/130-ibm-kafka-config/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +name: ibm-kafka-config +description: Kafka Config for MAS Core Platform Suite +type: application +version: 1.0.0 diff --git a/instance-applications/130-ibm-kafka-config/README.md b/instance-applications/130-ibm-kafka-config/README.md new file mode 100644 index 000000000..7c8649cc6 --- /dev/null +++ b/instance-applications/130-ibm-kafka-config/README.md @@ -0,0 +1,6 @@ +Kafka Configuration for MAS Core Platform +=============================================================================== +Create a KafkaCfg CR instance and associated credentials secret for use by MAS. + + +Contains a post-delete hook (`postdelete-delete-cr.yaml`) that will ensure the config CR is deleted when the ArgoCD application managing this chart is deleted (this will not happen by default as the config CR is asserted to be owned by the `Suite` CR by the MAS entity managers). \ No newline at end of file diff --git a/instance-applications/130-ibm-kafka-config/templates/01-kafka-credentials_Secret.yaml b/instance-applications/130-ibm-kafka-config/templates/01-kafka-credentials_Secret.yaml new file mode 100644 index 000000000..2ce83e446 --- /dev/null +++ b/instance-applications/130-ibm-kafka-config/templates/01-kafka-credentials_Secret.yaml @@ -0,0 +1,18 @@ +{{- if .Values.suite_kafka_username }} +--- +kind: Secret +apiVersion: v1 +metadata: + name: "{{ .Values.kafka_config.config.credentials.secretName }}" + namespace: mas-{{ .Values.instance_id }}-core + annotations: + argocd.argoproj.io/sync-wave: "150" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +type: Opaque +stringData: + username: "{{ .Values.suite_kafka_username }}" + password: "{{ .Values.suite_kafka_password }}" +{{- end }} diff --git a/instance-applications/130-ibm-kafka-config/templates/02-ibm-mas_KafkaCfg.yaml b/instance-applications/130-ibm-kafka-config/templates/02-ibm-mas_KafkaCfg.yaml new file mode 100644 index 000000000..123d8341a --- /dev/null +++ b/instance-applications/130-ibm-kafka-config/templates/02-ibm-mas_KafkaCfg.yaml @@ -0,0 +1,21 @@ +{{- if .Values.kafka_config }} +--- +apiVersion: config.mas.ibm.com/v1 +kind: KafkaCfg +metadata: + name: "{{ .Values.mas_config_name }}" + namespace: mas-{{ .Values.instance_id }}-core + labels: + mas.ibm.com/configScope: system + mas.ibm.com/instanceId: "{{ .Values.instance_id }}" +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} + annotations: + argocd.argoproj.io/sync-wave: "151" + argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true +spec: + displayName: "MSK Kafka - msk-{{ .Values.instance_id }}" + type: external +{{ .Values.kafka_config | toYaml | indent 2 }} +{{- end }} diff --git a/instance-applications/130-ibm-kafka-config/templates/postdelete-delete-cr.yaml b/instance-applications/130-ibm-kafka-config/templates/postdelete-delete-cr.yaml new file mode 100644 index 000000000..a056c32e7 --- /dev/null +++ b/instance-applications/130-ibm-kafka-config/templates/postdelete-delete-cr.yaml @@ -0,0 +1,124 @@ +{{- if .Values.use_postdelete_hooks }} + +{{ $cr_name := .Values.mas_config_name }} +{{ $cr_kind := .Values.mas_config_kind }} +{{ $cr_api_version := .Values.mas_config_api_version }} + +{{ $job_name := printf "postdelete-delete-cr-job-%s" $cr_name }} + +# NOTE: depends on resources created in ibm-mas-suite chart (01-postdelete-crs-resources) +# The values below must align with the values in that file +{{ $role_name := "postdelete-delete-cr-r" }} +{{ $sa_name := "postdelete-delete-cr-sa" }} +{{ $rb_name := "postdelete-delete-cr-rb" }} +{{ $np_name := "postdelete-delete-cr-np" }} +{{ $job_label := "postdelete-delete-cr-job" }} +{{ $ns := printf "mas-%s-core" .Values.instance_id }} + + +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ $job_name }} + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/hook: PostDelete + argocd.argoproj.io/hook-delete-policy: HookSucceeded +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + template: + metadata: + labels: + app: {{ $job_label }} +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 8 }} +{{- end }} + spec: + containers: + - name: run + # TODO: use a dedicated image with a smaller footprint for this sort of thing? + # Just using cli for now since it has all the deps we need to talk with AWS SM + image: quay.io/ibmmas/cli:9.4.0-pre.gitops + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 200m + memory: 512Mi + requests: + cpu: 10m + memory: 64Mi + env: + - name: CR_NAMESPACE + value: {{ $ns }} + - name: CR_NAME + value: {{ $cr_name }} + + - name: CR_API_VERSION + value: {{ $cr_api_version }} + + - name: CR_KIND + value: {{ $cr_kind }} + + volumeMounts: [] + command: + - /bin/sh + - -c + - | + + set -e + + function delete_oc_resource(){ + RESOURCE=$1 + NAMESPACE=$2 + echo + echo "------------------------------------------------------------------" + echo "Check if resource $RESOURCE is present in namespace $NAMESPACE " + + # don't want a non-zero rc from oc delete to cause the job to fail + # so, temporarily set +e + set +e + RESOURCE_NAME=$(oc get $RESOURCE -n $NAMESPACE -o=jsonpath="{.metadata.name}") + set -e + if [[ -z "${RESOURCE_NAME}" ]]; then + echo "$RESOURCE not found, skipping" + return 0 + fi + + echo "oc delete resource $RESOURCE in namespace $NAMESPACE " + + # don't want a non-zero rc from oc delete to cause the job to fail (since we then want to try patching out the finalizers) + # so, temporarily set +e + set +e + oc delete $RESOURCE -n $NAMESPACE --timeout=300s --wait=true + return_code=$? + set -e + + echo "Verify that resource $RESOURCE is now absent in namespace $NAMESPACE " + # don't want a non-zero rc from oc delete to cause the job to fail + # so, temporarily set +e + set +e + RESOURCE_NAME=$(oc get $RESOURCE -n $NAMESPACE -o=jsonpath="{.metadata.name}") + set -e + if [[ -n "${RESOURCE_NAME}" ]]; then + echo "$RESOURCE still present, failing job" + exit 1 + fi + + echo "... verified" + return 0 + + } + + + delete_oc_resource "${CR_KIND}.${CR_API_VERSION}/${CR_NAME}" "${CR_NAMESPACE}" + + + restartPolicy: Never + serviceAccountName: {{ $sa_name }} + volumes: [] + backoffLimit: 4 +{{- end }} diff --git a/instance-applications/130-ibm-kafka-config/values.yaml b/instance-applications/130-ibm-kafka-config/values.yaml new file mode 100644 index 000000000..3de2c1d75 --- /dev/null +++ b/instance-applications/130-ibm-kafka-config/values.yaml @@ -0,0 +1,2 @@ +--- +mas_instance_id: xxx diff --git a/instance-applications/130-ibm-mas-bas-config/Chart.yaml b/instance-applications/130-ibm-mas-bas-config/Chart.yaml new file mode 100644 index 000000000..5112a1c71 --- /dev/null +++ b/instance-applications/130-ibm-mas-bas-config/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +name: ibm-mas-bas-config +description: BAS Config for MAS Core Platform Suite +type: application +version: 1.0.0 diff --git a/instance-applications/130-ibm-mas-bas-config/README.md b/instance-applications/130-ibm-mas-bas-config/README.md new file mode 100644 index 000000000..e7fff530d --- /dev/null +++ b/instance-applications/130-ibm-mas-bas-config/README.md @@ -0,0 +1,5 @@ +BAS Configuration for MAS Core Platform +=============================================================================== +Create a BasCfg CR instance and associated credentials secret for use by MAS. + +Contains a post-delete hook (`postdelete-delete-cr.yaml`) that will ensure the config CR is deleted when the ArgoCD application managing this chart is deleted (this will not happen by default as the config CR is asserted to be owned by the `Suite` CR by the MAS entity managers). \ No newline at end of file diff --git a/instance-applications/130-ibm-mas-bas-config/templates/01-dro-api-token_Secret.yaml b/instance-applications/130-ibm-mas-bas-config/templates/01-dro-api-token_Secret.yaml new file mode 100644 index 000000000..e929750a6 --- /dev/null +++ b/instance-applications/130-ibm-mas-bas-config/templates/01-dro-api-token_Secret.yaml @@ -0,0 +1,15 @@ +--- +kind: Secret +apiVersion: v1 +metadata: + name: "{{ .Values.mas_config_name }}-credentials" + namespace: mas-{{ .Values.instance_id }}-core + annotations: + argocd.argoproj.io/sync-wave: "131" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +type: Opaque +stringData: + api_key: "{{ .Values.dro_api_token }}" diff --git a/instance-applications/130-ibm-mas-bas-config/templates/03-ibm-mas_BasCfg.yaml b/instance-applications/130-ibm-mas-bas-config/templates/03-ibm-mas_BasCfg.yaml new file mode 100644 index 000000000..025b09c05 --- /dev/null +++ b/instance-applications/130-ibm-mas-bas-config/templates/03-ibm-mas_BasCfg.yaml @@ -0,0 +1,31 @@ +--- +apiVersion: config.mas.ibm.com/v1 +kind: BasCfg +metadata: + name: "{{ .Values.mas_config_name }}" + namespace: mas-{{ .Values.instance_id }}-core + labels: + mas.ibm.com/configScope: system + mas.ibm.com/instanceId: "{{ .Values.instance_id }}" +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} + annotations: + argocd.argoproj.io/sync-wave: "134" + argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true +spec: + displayName: DRO {{ .Values.instance_id }} + config: + url: "{{ .Values.dro_endpoint_url }}" + contact: + email: "{{ .Values.dro_contact.email }}" + firstName: "{{ .Values.dro_contact.first_name }}" + lastName: "{{ .Values.dro_contact.last_name }}" + credentials: + secretName: "{{ .Values.mas_config_name }}-credentials" +{{- if .Values.mas_segment_key }} + segmentKey: {{ .Values.mas_segment_key }} +{{- end }} + certificates: + - alias: ca +{{ .Values.dro_ca | toYaml | indent 6 }} diff --git a/instance-applications/130-ibm-mas-bas-config/templates/postdelete-delete-cr.yaml b/instance-applications/130-ibm-mas-bas-config/templates/postdelete-delete-cr.yaml new file mode 100644 index 000000000..a056c32e7 --- /dev/null +++ b/instance-applications/130-ibm-mas-bas-config/templates/postdelete-delete-cr.yaml @@ -0,0 +1,124 @@ +{{- if .Values.use_postdelete_hooks }} + +{{ $cr_name := .Values.mas_config_name }} +{{ $cr_kind := .Values.mas_config_kind }} +{{ $cr_api_version := .Values.mas_config_api_version }} + +{{ $job_name := printf "postdelete-delete-cr-job-%s" $cr_name }} + +# NOTE: depends on resources created in ibm-mas-suite chart (01-postdelete-crs-resources) +# The values below must align with the values in that file +{{ $role_name := "postdelete-delete-cr-r" }} +{{ $sa_name := "postdelete-delete-cr-sa" }} +{{ $rb_name := "postdelete-delete-cr-rb" }} +{{ $np_name := "postdelete-delete-cr-np" }} +{{ $job_label := "postdelete-delete-cr-job" }} +{{ $ns := printf "mas-%s-core" .Values.instance_id }} + + +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ $job_name }} + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/hook: PostDelete + argocd.argoproj.io/hook-delete-policy: HookSucceeded +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + template: + metadata: + labels: + app: {{ $job_label }} +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 8 }} +{{- end }} + spec: + containers: + - name: run + # TODO: use a dedicated image with a smaller footprint for this sort of thing? + # Just using cli for now since it has all the deps we need to talk with AWS SM + image: quay.io/ibmmas/cli:9.4.0-pre.gitops + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 200m + memory: 512Mi + requests: + cpu: 10m + memory: 64Mi + env: + - name: CR_NAMESPACE + value: {{ $ns }} + - name: CR_NAME + value: {{ $cr_name }} + + - name: CR_API_VERSION + value: {{ $cr_api_version }} + + - name: CR_KIND + value: {{ $cr_kind }} + + volumeMounts: [] + command: + - /bin/sh + - -c + - | + + set -e + + function delete_oc_resource(){ + RESOURCE=$1 + NAMESPACE=$2 + echo + echo "------------------------------------------------------------------" + echo "Check if resource $RESOURCE is present in namespace $NAMESPACE " + + # don't want a non-zero rc from oc delete to cause the job to fail + # so, temporarily set +e + set +e + RESOURCE_NAME=$(oc get $RESOURCE -n $NAMESPACE -o=jsonpath="{.metadata.name}") + set -e + if [[ -z "${RESOURCE_NAME}" ]]; then + echo "$RESOURCE not found, skipping" + return 0 + fi + + echo "oc delete resource $RESOURCE in namespace $NAMESPACE " + + # don't want a non-zero rc from oc delete to cause the job to fail (since we then want to try patching out the finalizers) + # so, temporarily set +e + set +e + oc delete $RESOURCE -n $NAMESPACE --timeout=300s --wait=true + return_code=$? + set -e + + echo "Verify that resource $RESOURCE is now absent in namespace $NAMESPACE " + # don't want a non-zero rc from oc delete to cause the job to fail + # so, temporarily set +e + set +e + RESOURCE_NAME=$(oc get $RESOURCE -n $NAMESPACE -o=jsonpath="{.metadata.name}") + set -e + if [[ -n "${RESOURCE_NAME}" ]]; then + echo "$RESOURCE still present, failing job" + exit 1 + fi + + echo "... verified" + return 0 + + } + + + delete_oc_resource "${CR_KIND}.${CR_API_VERSION}/${CR_NAME}" "${CR_NAMESPACE}" + + + restartPolicy: Never + serviceAccountName: {{ $sa_name }} + volumes: [] + backoffLimit: 4 +{{- end }} diff --git a/instance-applications/130-ibm-mas-bas-config/values.yaml b/instance-applications/130-ibm-mas-bas-config/values.yaml new file mode 100644 index 000000000..3de2c1d75 --- /dev/null +++ b/instance-applications/130-ibm-mas-bas-config/values.yaml @@ -0,0 +1,2 @@ +--- +mas_instance_id: xxx diff --git a/instance-applications/130-ibm-mas-idp-config/Chart.yaml b/instance-applications/130-ibm-mas-idp-config/Chart.yaml new file mode 100644 index 000000000..e09d50e8b --- /dev/null +++ b/instance-applications/130-ibm-mas-idp-config/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +name: ibm-mas-idp-config +description: IDP Config for MAS Core Platform Suite +type: application +version: 1.0.0 diff --git a/instance-applications/130-ibm-mas-idp-config/README.md b/instance-applications/130-ibm-mas-idp-config/README.md new file mode 100644 index 000000000..a96891736 --- /dev/null +++ b/instance-applications/130-ibm-mas-idp-config/README.md @@ -0,0 +1,6 @@ +IDP Configuration for MAS Core Platform +=============================================================================== +Create a IdpCfg CR instance and associated credentials secret for use by MAS. +Currently only supports LDAP. + +Contains a post-delete hook (`postdelete-delete-cr.yaml`) that will ensure the config CR is deleted when the ArgoCD application managing this chart is deleted (this will not happen by default as the config CR is asserted to be owned by the `Suite` CR by the MAS entity managers). \ No newline at end of file diff --git a/instance-applications/130-ibm-mas-idp-config/templates/01-ldap-credentials_Secret.yaml b/instance-applications/130-ibm-mas-idp-config/templates/01-ldap-credentials_Secret.yaml new file mode 100644 index 000000000..784bbc364 --- /dev/null +++ b/instance-applications/130-ibm-mas-idp-config/templates/01-ldap-credentials_Secret.yaml @@ -0,0 +1,16 @@ +--- +kind: Secret +apiVersion: v1 +metadata: + name: "{{ .Values.mas_config_name }}-credentials" + namespace: mas-{{ .Values.instance_id }}-core + annotations: + argocd.argoproj.io/sync-wave: "150" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +type: Opaque +stringData: + bindDN: "{{ .Values.suite_ldap_bind_dn }}" + bindPassword: "{{ .Values.suite_ldap_bind_password }}" diff --git a/instance-applications/130-ibm-mas-idp-config/templates/02-ibm-mas_IdpCfg.yaml b/instance-applications/130-ibm-mas-idp-config/templates/02-ibm-mas_IdpCfg.yaml new file mode 100644 index 000000000..081f14991 --- /dev/null +++ b/instance-applications/130-ibm-mas-idp-config/templates/02-ibm-mas_IdpCfg.yaml @@ -0,0 +1,27 @@ +--- +apiVersion: config.mas.ibm.com/v1 +kind: IDPCfg +metadata: + name: "{{ .Values.mas_config_name }}" + namespace: mas-{{ .Values.instance_id }}-core + labels: + mas.ibm.com/configScope: system + mas.ibm.com/instanceId: "{{ .Values.instance_id }}" + mas.ibm.com/configId: default +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} + annotations: + argocd.argoproj.io/sync-wave: "151" + argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true +spec: + displayName: "{{ .Values.suite_ldap_display_name }}" + ldap: + url: {{ .Values.suite_ldap_url }} + baseDN: {{ .Values.suite_ldap_basedn }} + userIdMap: {{ .Values.suite_ldap_userid_map }} + credentials: + secretName: "{{ .Values.mas_config_name }}-credentials" + certificates: + - alias: ldapca +{{ .Values.suite_ldap_certificate | toYaml | indent 6 }} diff --git a/instance-applications/130-ibm-mas-idp-config/templates/postdelete-delete-cr.yaml b/instance-applications/130-ibm-mas-idp-config/templates/postdelete-delete-cr.yaml new file mode 100644 index 000000000..a056c32e7 --- /dev/null +++ b/instance-applications/130-ibm-mas-idp-config/templates/postdelete-delete-cr.yaml @@ -0,0 +1,124 @@ +{{- if .Values.use_postdelete_hooks }} + +{{ $cr_name := .Values.mas_config_name }} +{{ $cr_kind := .Values.mas_config_kind }} +{{ $cr_api_version := .Values.mas_config_api_version }} + +{{ $job_name := printf "postdelete-delete-cr-job-%s" $cr_name }} + +# NOTE: depends on resources created in ibm-mas-suite chart (01-postdelete-crs-resources) +# The values below must align with the values in that file +{{ $role_name := "postdelete-delete-cr-r" }} +{{ $sa_name := "postdelete-delete-cr-sa" }} +{{ $rb_name := "postdelete-delete-cr-rb" }} +{{ $np_name := "postdelete-delete-cr-np" }} +{{ $job_label := "postdelete-delete-cr-job" }} +{{ $ns := printf "mas-%s-core" .Values.instance_id }} + + +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ $job_name }} + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/hook: PostDelete + argocd.argoproj.io/hook-delete-policy: HookSucceeded +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + template: + metadata: + labels: + app: {{ $job_label }} +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 8 }} +{{- end }} + spec: + containers: + - name: run + # TODO: use a dedicated image with a smaller footprint for this sort of thing? + # Just using cli for now since it has all the deps we need to talk with AWS SM + image: quay.io/ibmmas/cli:9.4.0-pre.gitops + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 200m + memory: 512Mi + requests: + cpu: 10m + memory: 64Mi + env: + - name: CR_NAMESPACE + value: {{ $ns }} + - name: CR_NAME + value: {{ $cr_name }} + + - name: CR_API_VERSION + value: {{ $cr_api_version }} + + - name: CR_KIND + value: {{ $cr_kind }} + + volumeMounts: [] + command: + - /bin/sh + - -c + - | + + set -e + + function delete_oc_resource(){ + RESOURCE=$1 + NAMESPACE=$2 + echo + echo "------------------------------------------------------------------" + echo "Check if resource $RESOURCE is present in namespace $NAMESPACE " + + # don't want a non-zero rc from oc delete to cause the job to fail + # so, temporarily set +e + set +e + RESOURCE_NAME=$(oc get $RESOURCE -n $NAMESPACE -o=jsonpath="{.metadata.name}") + set -e + if [[ -z "${RESOURCE_NAME}" ]]; then + echo "$RESOURCE not found, skipping" + return 0 + fi + + echo "oc delete resource $RESOURCE in namespace $NAMESPACE " + + # don't want a non-zero rc from oc delete to cause the job to fail (since we then want to try patching out the finalizers) + # so, temporarily set +e + set +e + oc delete $RESOURCE -n $NAMESPACE --timeout=300s --wait=true + return_code=$? + set -e + + echo "Verify that resource $RESOURCE is now absent in namespace $NAMESPACE " + # don't want a non-zero rc from oc delete to cause the job to fail + # so, temporarily set +e + set +e + RESOURCE_NAME=$(oc get $RESOURCE -n $NAMESPACE -o=jsonpath="{.metadata.name}") + set -e + if [[ -n "${RESOURCE_NAME}" ]]; then + echo "$RESOURCE still present, failing job" + exit 1 + fi + + echo "... verified" + return 0 + + } + + + delete_oc_resource "${CR_KIND}.${CR_API_VERSION}/${CR_NAME}" "${CR_NAMESPACE}" + + + restartPolicy: Never + serviceAccountName: {{ $sa_name }} + volumes: [] + backoffLimit: 4 +{{- end }} diff --git a/instance-applications/130-ibm-mas-idp-config/values.yaml b/instance-applications/130-ibm-mas-idp-config/values.yaml new file mode 100644 index 000000000..3de2c1d75 --- /dev/null +++ b/instance-applications/130-ibm-mas-idp-config/values.yaml @@ -0,0 +1,2 @@ +--- +mas_instance_id: xxx diff --git a/instance-applications/130-ibm-mas-mongo-config/Chart.yaml b/instance-applications/130-ibm-mas-mongo-config/Chart.yaml new file mode 100644 index 000000000..eb792a7cd --- /dev/null +++ b/instance-applications/130-ibm-mas-mongo-config/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +name: ibm-mas-mongo-config +description: Mongo Config for MAS Core Platform Suite +type: application +version: 1.0.0 diff --git a/instance-applications/130-ibm-mas-mongo-config/README.md b/instance-applications/130-ibm-mas-mongo-config/README.md new file mode 100644 index 000000000..ce83781b7 --- /dev/null +++ b/instance-applications/130-ibm-mas-mongo-config/README.md @@ -0,0 +1,5 @@ +Mongo Configuration for MAS Core Platform +=============================================================================== +Create a MongoCfg CR instance and associated credentials secret for use by MAS. + +Contains a post-delete hook (`postdelete-delete-cr.yaml`) that will ensure the config CR is deleted when the ArgoCD application managing this chart is deleted (this will not happen by default as the config CR is asserted to be owned by the `Suite` CR by the MAS entity managers). \ No newline at end of file diff --git a/instance-applications/130-ibm-mas-mongo-config/templates/01-mongo-credentials_Secret.yaml b/instance-applications/130-ibm-mas-mongo-config/templates/01-mongo-credentials_Secret.yaml new file mode 100644 index 000000000..af809e5d0 --- /dev/null +++ b/instance-applications/130-ibm-mas-mongo-config/templates/01-mongo-credentials_Secret.yaml @@ -0,0 +1,16 @@ +--- +kind: Secret +apiVersion: v1 +metadata: + name: "{{ .Values.config.credentials.secretName }}" + namespace: mas-{{ .Values.instance_id }}-core + annotations: + argocd.argoproj.io/sync-wave: "131" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +type: Opaque +stringData: + username: "{{ .Values.username }}" + password: "{{ .Values.password }}" diff --git a/instance-applications/130-ibm-mas-mongo-config/templates/02-ibm-mas_MongoCfg.yaml b/instance-applications/130-ibm-mas-mongo-config/templates/02-ibm-mas_MongoCfg.yaml new file mode 100644 index 000000000..2560e59a0 --- /dev/null +++ b/instance-applications/130-ibm-mas-mongo-config/templates/02-ibm-mas_MongoCfg.yaml @@ -0,0 +1,22 @@ +--- +apiVersion: config.mas.ibm.com/v1 +kind: MongoCfg +metadata: + name: "{{ .Values.mas_config_name }}" + namespace: mas-{{ .Values.instance_id }}-core + labels: + mas.ibm.com/configScope: system + mas.ibm.com/instanceId: "{{ .Values.instance_id }}" +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} + annotations: + argocd.argoproj.io/sync-wave: "134" + argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true +spec: + displayName: IBM Cloud Databases for Mongo + type: external + config: +{{ .Values.config | toYaml | indent 4 }} + certificates: +{{ .Values.certificates | toYaml | indent 4 }} diff --git a/instance-applications/130-ibm-mas-mongo-config/templates/postdelete-delete-cr.yaml b/instance-applications/130-ibm-mas-mongo-config/templates/postdelete-delete-cr.yaml new file mode 100644 index 000000000..a056c32e7 --- /dev/null +++ b/instance-applications/130-ibm-mas-mongo-config/templates/postdelete-delete-cr.yaml @@ -0,0 +1,124 @@ +{{- if .Values.use_postdelete_hooks }} + +{{ $cr_name := .Values.mas_config_name }} +{{ $cr_kind := .Values.mas_config_kind }} +{{ $cr_api_version := .Values.mas_config_api_version }} + +{{ $job_name := printf "postdelete-delete-cr-job-%s" $cr_name }} + +# NOTE: depends on resources created in ibm-mas-suite chart (01-postdelete-crs-resources) +# The values below must align with the values in that file +{{ $role_name := "postdelete-delete-cr-r" }} +{{ $sa_name := "postdelete-delete-cr-sa" }} +{{ $rb_name := "postdelete-delete-cr-rb" }} +{{ $np_name := "postdelete-delete-cr-np" }} +{{ $job_label := "postdelete-delete-cr-job" }} +{{ $ns := printf "mas-%s-core" .Values.instance_id }} + + +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ $job_name }} + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/hook: PostDelete + argocd.argoproj.io/hook-delete-policy: HookSucceeded +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + template: + metadata: + labels: + app: {{ $job_label }} +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 8 }} +{{- end }} + spec: + containers: + - name: run + # TODO: use a dedicated image with a smaller footprint for this sort of thing? + # Just using cli for now since it has all the deps we need to talk with AWS SM + image: quay.io/ibmmas/cli:9.4.0-pre.gitops + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 200m + memory: 512Mi + requests: + cpu: 10m + memory: 64Mi + env: + - name: CR_NAMESPACE + value: {{ $ns }} + - name: CR_NAME + value: {{ $cr_name }} + + - name: CR_API_VERSION + value: {{ $cr_api_version }} + + - name: CR_KIND + value: {{ $cr_kind }} + + volumeMounts: [] + command: + - /bin/sh + - -c + - | + + set -e + + function delete_oc_resource(){ + RESOURCE=$1 + NAMESPACE=$2 + echo + echo "------------------------------------------------------------------" + echo "Check if resource $RESOURCE is present in namespace $NAMESPACE " + + # don't want a non-zero rc from oc delete to cause the job to fail + # so, temporarily set +e + set +e + RESOURCE_NAME=$(oc get $RESOURCE -n $NAMESPACE -o=jsonpath="{.metadata.name}") + set -e + if [[ -z "${RESOURCE_NAME}" ]]; then + echo "$RESOURCE not found, skipping" + return 0 + fi + + echo "oc delete resource $RESOURCE in namespace $NAMESPACE " + + # don't want a non-zero rc from oc delete to cause the job to fail (since we then want to try patching out the finalizers) + # so, temporarily set +e + set +e + oc delete $RESOURCE -n $NAMESPACE --timeout=300s --wait=true + return_code=$? + set -e + + echo "Verify that resource $RESOURCE is now absent in namespace $NAMESPACE " + # don't want a non-zero rc from oc delete to cause the job to fail + # so, temporarily set +e + set +e + RESOURCE_NAME=$(oc get $RESOURCE -n $NAMESPACE -o=jsonpath="{.metadata.name}") + set -e + if [[ -n "${RESOURCE_NAME}" ]]; then + echo "$RESOURCE still present, failing job" + exit 1 + fi + + echo "... verified" + return 0 + + } + + + delete_oc_resource "${CR_KIND}.${CR_API_VERSION}/${CR_NAME}" "${CR_NAMESPACE}" + + + restartPolicy: Never + serviceAccountName: {{ $sa_name }} + volumes: [] + backoffLimit: 4 +{{- end }} diff --git a/instance-applications/130-ibm-mas-mongo-config/values.yaml b/instance-applications/130-ibm-mas-mongo-config/values.yaml new file mode 100644 index 000000000..3de2c1d75 --- /dev/null +++ b/instance-applications/130-ibm-mas-mongo-config/values.yaml @@ -0,0 +1,2 @@ +--- +mas_instance_id: xxx diff --git a/instance-applications/130-ibm-mas-sls-config/Chart.yaml b/instance-applications/130-ibm-mas-sls-config/Chart.yaml new file mode 100644 index 000000000..59866eb8f --- /dev/null +++ b/instance-applications/130-ibm-mas-sls-config/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +name: ibm-mas-sls-config +description: SLS Config for MAS Core Platform Suite +type: application +version: 1.0.0 diff --git a/instance-applications/130-ibm-mas-sls-config/README.md b/instance-applications/130-ibm-mas-sls-config/README.md new file mode 100644 index 000000000..e6c9314de --- /dev/null +++ b/instance-applications/130-ibm-mas-sls-config/README.md @@ -0,0 +1,5 @@ +SLS Configuration for MAS Core Platform +=============================================================================== +Create a SlsCfg CR instance and associated credentials secret for use by MAS. + +Contains a post-delete hook (`postdelete-delete-cr.yaml`) that will ensure the config CR is deleted when the ArgoCD application managing this chart is deleted (this will not happen by default as the config CR is asserted to be owned by the `Suite` CR by the MAS entity managers). \ No newline at end of file diff --git a/instance-applications/130-ibm-mas-sls-config/templates/04-sls-registration_Secret.yaml b/instance-applications/130-ibm-mas-sls-config/templates/04-sls-registration_Secret.yaml new file mode 100644 index 000000000..3fbea22af --- /dev/null +++ b/instance-applications/130-ibm-mas-sls-config/templates/04-sls-registration_Secret.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: v1 +kind: Secret +type: opaque +metadata: + name: "{{ .Values.mas_config_name }}-credentials" + namespace: mas-{{ .Values.instance_id }}-core + annotations: + argocd.argoproj.io/sync-wave: "131" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +stringData: + registrationKey: "{{ .Values.registration_key }}" diff --git a/instance-applications/130-ibm-mas-sls-config/templates/05-ibm-mas_SlsCfg.yaml b/instance-applications/130-ibm-mas-sls-config/templates/05-ibm-mas_SlsCfg.yaml new file mode 100644 index 000000000..da394359c --- /dev/null +++ b/instance-applications/130-ibm-mas-sls-config/templates/05-ibm-mas_SlsCfg.yaml @@ -0,0 +1,24 @@ +--- +apiVersion: config.mas.ibm.com/v1 +kind: SlsCfg +metadata: + name: "{{ .Values.mas_config_name }}" + namespace: mas-{{ .Values.instance_id }}-core + labels: + mas.ibm.com/configScope: system + mas.ibm.com/instanceId: "{{ .Values.instance_id }}" +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} + annotations: + argocd.argoproj.io/sync-wave: "134" + argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true +spec: + displayName: SLS + config: + url: "{{ .Values.url }}" + credentials: + secretName: "{{ .Values.mas_config_name }}-credentials" + certificates: + - alias: ca +{{ .Values.ca | toYaml | indent 6 }} diff --git a/instance-applications/130-ibm-mas-sls-config/templates/postdelete-delete-cr.yaml b/instance-applications/130-ibm-mas-sls-config/templates/postdelete-delete-cr.yaml new file mode 100644 index 000000000..a056c32e7 --- /dev/null +++ b/instance-applications/130-ibm-mas-sls-config/templates/postdelete-delete-cr.yaml @@ -0,0 +1,124 @@ +{{- if .Values.use_postdelete_hooks }} + +{{ $cr_name := .Values.mas_config_name }} +{{ $cr_kind := .Values.mas_config_kind }} +{{ $cr_api_version := .Values.mas_config_api_version }} + +{{ $job_name := printf "postdelete-delete-cr-job-%s" $cr_name }} + +# NOTE: depends on resources created in ibm-mas-suite chart (01-postdelete-crs-resources) +# The values below must align with the values in that file +{{ $role_name := "postdelete-delete-cr-r" }} +{{ $sa_name := "postdelete-delete-cr-sa" }} +{{ $rb_name := "postdelete-delete-cr-rb" }} +{{ $np_name := "postdelete-delete-cr-np" }} +{{ $job_label := "postdelete-delete-cr-job" }} +{{ $ns := printf "mas-%s-core" .Values.instance_id }} + + +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ $job_name }} + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/hook: PostDelete + argocd.argoproj.io/hook-delete-policy: HookSucceeded +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + template: + metadata: + labels: + app: {{ $job_label }} +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 8 }} +{{- end }} + spec: + containers: + - name: run + # TODO: use a dedicated image with a smaller footprint for this sort of thing? + # Just using cli for now since it has all the deps we need to talk with AWS SM + image: quay.io/ibmmas/cli:9.4.0-pre.gitops + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 200m + memory: 512Mi + requests: + cpu: 10m + memory: 64Mi + env: + - name: CR_NAMESPACE + value: {{ $ns }} + - name: CR_NAME + value: {{ $cr_name }} + + - name: CR_API_VERSION + value: {{ $cr_api_version }} + + - name: CR_KIND + value: {{ $cr_kind }} + + volumeMounts: [] + command: + - /bin/sh + - -c + - | + + set -e + + function delete_oc_resource(){ + RESOURCE=$1 + NAMESPACE=$2 + echo + echo "------------------------------------------------------------------" + echo "Check if resource $RESOURCE is present in namespace $NAMESPACE " + + # don't want a non-zero rc from oc delete to cause the job to fail + # so, temporarily set +e + set +e + RESOURCE_NAME=$(oc get $RESOURCE -n $NAMESPACE -o=jsonpath="{.metadata.name}") + set -e + if [[ -z "${RESOURCE_NAME}" ]]; then + echo "$RESOURCE not found, skipping" + return 0 + fi + + echo "oc delete resource $RESOURCE in namespace $NAMESPACE " + + # don't want a non-zero rc from oc delete to cause the job to fail (since we then want to try patching out the finalizers) + # so, temporarily set +e + set +e + oc delete $RESOURCE -n $NAMESPACE --timeout=300s --wait=true + return_code=$? + set -e + + echo "Verify that resource $RESOURCE is now absent in namespace $NAMESPACE " + # don't want a non-zero rc from oc delete to cause the job to fail + # so, temporarily set +e + set +e + RESOURCE_NAME=$(oc get $RESOURCE -n $NAMESPACE -o=jsonpath="{.metadata.name}") + set -e + if [[ -n "${RESOURCE_NAME}" ]]; then + echo "$RESOURCE still present, failing job" + exit 1 + fi + + echo "... verified" + return 0 + + } + + + delete_oc_resource "${CR_KIND}.${CR_API_VERSION}/${CR_NAME}" "${CR_NAMESPACE}" + + + restartPolicy: Never + serviceAccountName: {{ $sa_name }} + volumes: [] + backoffLimit: 4 +{{- end }} diff --git a/instance-applications/130-ibm-mas-sls-config/values.yaml b/instance-applications/130-ibm-mas-sls-config/values.yaml new file mode 100644 index 000000000..ed97d539c --- /dev/null +++ b/instance-applications/130-ibm-mas-sls-config/values.yaml @@ -0,0 +1 @@ +--- diff --git a/instance-applications/130-ibm-mas-smtp-config/Chart.yaml b/instance-applications/130-ibm-mas-smtp-config/Chart.yaml new file mode 100644 index 000000000..e210566cf --- /dev/null +++ b/instance-applications/130-ibm-mas-smtp-config/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +name: ibm-mas-smtp-config +description: Smtp Config for MAS Core Platform Suite +type: application +version: 1.0.0 diff --git a/instance-applications/130-ibm-mas-smtp-config/README.md b/instance-applications/130-ibm-mas-smtp-config/README.md new file mode 100644 index 000000000..1a168ad2b --- /dev/null +++ b/instance-applications/130-ibm-mas-smtp-config/README.md @@ -0,0 +1,5 @@ +SMTP Configuration for MAS Core Platform +=============================================================================== +Create a SmtpCfg CR instance and associated credentials secret for use by MAS. + +Contains a post-delete hook (`postdelete-delete-cr.yaml`) that will ensure the config CR is deleted when the ArgoCD application managing this chart is deleted (this will not happen by default as the config CR is asserted to be owned by the `Suite` CR by the MAS entity managers). \ No newline at end of file diff --git a/instance-applications/130-ibm-mas-smtp-config/templates/01-smtp-credentials_Secret.yaml b/instance-applications/130-ibm-mas-smtp-config/templates/01-smtp-credentials_Secret.yaml new file mode 100644 index 000000000..54dfb2135 --- /dev/null +++ b/instance-applications/130-ibm-mas-smtp-config/templates/01-smtp-credentials_Secret.yaml @@ -0,0 +1,16 @@ +--- +kind: Secret +apiVersion: v1 +metadata: + name: "{{ .Values.mas_config_name }}-credentials" + namespace: mas-{{ .Values.instance_id }}-core + annotations: + argocd.argoproj.io/sync-wave: "150" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +type: Opaque +stringData: + username: "{{ .Values.suite_smtp_username }}" + password: "{{ .Values.suite_smtp_password }}" diff --git a/instance-applications/130-ibm-mas-smtp-config/templates/02-ibm-mas_SmtpCfg.yaml b/instance-applications/130-ibm-mas-smtp-config/templates/02-ibm-mas_SmtpCfg.yaml new file mode 100644 index 000000000..976d13519 --- /dev/null +++ b/instance-applications/130-ibm-mas-smtp-config/templates/02-ibm-mas_SmtpCfg.yaml @@ -0,0 +1,28 @@ +--- +apiVersion: config.mas.ibm.com/v1 +kind: SmtpCfg +metadata: + name: "{{ .Values.mas_config_name }}" + namespace: mas-{{ .Values.instance_id }}-core + labels: + mas.ibm.com/configScope: system + mas.ibm.com/instanceId: "{{ .Values.instance_id }}" +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} + annotations: + argocd.argoproj.io/sync-wave: "151" + argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true +spec: + displayName: "{{ .Values.suite_smtp_display_name }}" + config: + hostname: {{ .Values.suite_smtp_host }} + port: {{ .Values.suite_smtp_port }} + security: {{ .Values.suite_smtp_security }} + authentication: {{ .Values.suite_smtp_authentication }} + defaultSenderEmail: {{ .Values.suite_smtp_default_sender_email }} + defaultSenderName: {{ .Values.suite_smtp_default_sender_name }} + defaultRecipientEmail: {{ .Values.suite_smtp_default_recipient_email }} + defaultShouldEmailPasswords: {{ .Values.suite_smtp_default_should_email_passwords }} + credentials: + secretName: "{{ .Values.mas_config_name }}-credentials" diff --git a/instance-applications/130-ibm-mas-smtp-config/templates/postdelete-delete-cr.yaml b/instance-applications/130-ibm-mas-smtp-config/templates/postdelete-delete-cr.yaml new file mode 100644 index 000000000..a056c32e7 --- /dev/null +++ b/instance-applications/130-ibm-mas-smtp-config/templates/postdelete-delete-cr.yaml @@ -0,0 +1,124 @@ +{{- if .Values.use_postdelete_hooks }} + +{{ $cr_name := .Values.mas_config_name }} +{{ $cr_kind := .Values.mas_config_kind }} +{{ $cr_api_version := .Values.mas_config_api_version }} + +{{ $job_name := printf "postdelete-delete-cr-job-%s" $cr_name }} + +# NOTE: depends on resources created in ibm-mas-suite chart (01-postdelete-crs-resources) +# The values below must align with the values in that file +{{ $role_name := "postdelete-delete-cr-r" }} +{{ $sa_name := "postdelete-delete-cr-sa" }} +{{ $rb_name := "postdelete-delete-cr-rb" }} +{{ $np_name := "postdelete-delete-cr-np" }} +{{ $job_label := "postdelete-delete-cr-job" }} +{{ $ns := printf "mas-%s-core" .Values.instance_id }} + + +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ $job_name }} + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/hook: PostDelete + argocd.argoproj.io/hook-delete-policy: HookSucceeded +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + template: + metadata: + labels: + app: {{ $job_label }} +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 8 }} +{{- end }} + spec: + containers: + - name: run + # TODO: use a dedicated image with a smaller footprint for this sort of thing? + # Just using cli for now since it has all the deps we need to talk with AWS SM + image: quay.io/ibmmas/cli:9.4.0-pre.gitops + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 200m + memory: 512Mi + requests: + cpu: 10m + memory: 64Mi + env: + - name: CR_NAMESPACE + value: {{ $ns }} + - name: CR_NAME + value: {{ $cr_name }} + + - name: CR_API_VERSION + value: {{ $cr_api_version }} + + - name: CR_KIND + value: {{ $cr_kind }} + + volumeMounts: [] + command: + - /bin/sh + - -c + - | + + set -e + + function delete_oc_resource(){ + RESOURCE=$1 + NAMESPACE=$2 + echo + echo "------------------------------------------------------------------" + echo "Check if resource $RESOURCE is present in namespace $NAMESPACE " + + # don't want a non-zero rc from oc delete to cause the job to fail + # so, temporarily set +e + set +e + RESOURCE_NAME=$(oc get $RESOURCE -n $NAMESPACE -o=jsonpath="{.metadata.name}") + set -e + if [[ -z "${RESOURCE_NAME}" ]]; then + echo "$RESOURCE not found, skipping" + return 0 + fi + + echo "oc delete resource $RESOURCE in namespace $NAMESPACE " + + # don't want a non-zero rc from oc delete to cause the job to fail (since we then want to try patching out the finalizers) + # so, temporarily set +e + set +e + oc delete $RESOURCE -n $NAMESPACE --timeout=300s --wait=true + return_code=$? + set -e + + echo "Verify that resource $RESOURCE is now absent in namespace $NAMESPACE " + # don't want a non-zero rc from oc delete to cause the job to fail + # so, temporarily set +e + set +e + RESOURCE_NAME=$(oc get $RESOURCE -n $NAMESPACE -o=jsonpath="{.metadata.name}") + set -e + if [[ -n "${RESOURCE_NAME}" ]]; then + echo "$RESOURCE still present, failing job" + exit 1 + fi + + echo "... verified" + return 0 + + } + + + delete_oc_resource "${CR_KIND}.${CR_API_VERSION}/${CR_NAME}" "${CR_NAMESPACE}" + + + restartPolicy: Never + serviceAccountName: {{ $sa_name }} + volumes: [] + backoffLimit: 4 +{{- end }} diff --git a/instance-applications/130-ibm-mas-smtp-config/values.yaml b/instance-applications/130-ibm-mas-smtp-config/values.yaml new file mode 100644 index 000000000..3de2c1d75 --- /dev/null +++ b/instance-applications/130-ibm-mas-smtp-config/values.yaml @@ -0,0 +1,2 @@ +--- +mas_instance_id: xxx diff --git a/instance-applications/130-ibm-mas-suite/Chart.yaml b/instance-applications/130-ibm-mas-suite/Chart.yaml new file mode 100644 index 000000000..956777838 --- /dev/null +++ b/instance-applications/130-ibm-mas-suite/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +name: ibm-mas-suite +description: MAS Core Platform Suite +type: application +version: 1.0.0 diff --git a/instance-applications/130-ibm-mas-suite/README.md b/instance-applications/130-ibm-mas-suite/README.md new file mode 100644 index 000000000..0fe50457e --- /dev/null +++ b/instance-applications/130-ibm-mas-suite/README.md @@ -0,0 +1,3 @@ +MAS Core Platform +=============================================================================== +Installs the `ibm-mas` operator and creates an instance of the `Suite`. diff --git a/instance-applications/130-ibm-mas-suite/templates/00-1-ibm-cis-webhook_rbac.yml.j2 b/instance-applications/130-ibm-mas-suite/templates/00-1-ibm-cis-webhook_rbac.yml.j2 new file mode 100644 index 000000000..7509edacf --- /dev/null +++ b/instance-applications/130-ibm-mas-suite/templates/00-1-ibm-cis-webhook_rbac.yml.j2 @@ -0,0 +1,163 @@ +{{- if and (eq .Values.dns_provider "cis") (eq .Values.mas_manual_cert_mgmt "False") }} + +{{ $cert_manager_namespace := "cert-manager" }} +{{ $cert_manager_service_account := "cert-manager" }} +{{ $cis_apiservice_group_name := "acme.cis.ibm.com" }} + + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + argocd.argoproj.io/sync-wave: "130" + name: "cert-manager-webhook-ibm-cis" + namespace: "{{ $cert_manager_namespace }}" + labels: + app: "cert-manager-webhook-ibm-cis" +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} + +--- +# Grant the webhook permission to read the ConfigMap containing the Kubernetes +# apiserver's requestheader-ca-certificate. +# This ConfigMap is automatically created by the Kubernetes apiserver. +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + annotations: + argocd.argoproj.io/sync-wave: "131" + namespace: "{{ $cert_manager_namespace }}" + name: "cert-manager-webhook-ibm-cis" + labels: + app: "cert-manager-webhook-ibm-cis" +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: "cert-manager-webhook-ibm-cis" +subjects: + - apiGroup: "" + kind: ServiceAccount + name: "cert-manager-webhook-ibm-cis" + namespace: {{ $cert_manager_namespace }} + +--- +# Grant cert-manager permission to validate using our apiserver +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + annotations: + argocd.argoproj.io/sync-wave: "130" + name: "cert-manager-webhook-ibm-cis" + namespace: "{{ $cert_manager_namespace }}" + labels: + app: "cert-manager-webhook-ibm-cis" +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +rules: + - apiGroups: + - '' + resources: + - 'secrets' + verbs: + - 'get' + +--- +# Grant the webhook permission to read the ConfigMap containing the Kubernetes +# apiserver's requestheader-ca-certificate. +# This ConfigMap is automatically created by the Kubernetes apiserver. +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + annotations: + argocd.argoproj.io/sync-wave: "131" + name: "cert-manager-webhook-ibm-cis:webhook-authentication-reader" + namespace: kube-system + labels: + app: "cert-manager-webhook-ibm-cis" +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: + - apiGroup: "" + kind: ServiceAccount + name: "cert-manager-webhook-ibm-cis" + namespace: "{{ $cert_manager_namespace }}" + +--- +# apiserver gets the auth-delegator role to delegate auth decisions to +# the core apiserver +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + argocd.argoproj.io/sync-wave: "131" + name: "cert-manager-webhook-ibm-cis:auth-delegator" + labels: + app: "cert-manager-webhook-ibm-cis" +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: + - apiGroup: "" + kind: ServiceAccount + name: "cert-manager-webhook-ibm-cis" + namespace: "{{ $cert_manager_namespace }}" + +--- +# Grant cert-manager permission to validate using our apiserver +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + argocd.argoproj.io/sync-wave: "130" + name: "cert-manager-webhook-ibm-cis:domain-solver" + labels: + app: "cert-manager-webhook-ibm-cis" +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +rules: + - apiGroups: + - "{{ $cis_apiservice_group_name }}" + resources: + - '*' + verbs: + - 'create' + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + argocd.argoproj.io/sync-wave: "131" + name: "cert-manager-webhook-ibm-cis:domain-solver" + labels: + app: "cert-manager-webhook-ibm-cis" +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: "cert-manager-webhook-ibm-cis:domain-solver" +subjects: + - apiGroup: "" + kind: ServiceAccount + name: "{{ $cert_manager_service_account }}" + namespace: "{{ $cert_manager_namespace }}" + +{{- end }} diff --git a/instance-applications/130-ibm-mas-suite/templates/00-2-ibm-cis-webhook_pki.yml.j2 b/instance-applications/130-ibm-mas-suite/templates/00-2-ibm-cis-webhook_pki.yml.j2 new file mode 100644 index 000000000..639358f6b --- /dev/null +++ b/instance-applications/130-ibm-mas-suite/templates/00-2-ibm-cis-webhook_pki.yml.j2 @@ -0,0 +1,87 @@ +{{- if and (eq .Values.dns_provider "cis") (eq .Values.mas_manual_cert_mgmt "False") }} + +{{ $cert_manager_namespace := "cert-manager" }} + +--- +# Create a selfsigned Issuer, in order to create a root CA certificate for +# signing webhook serving certificates +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + annotations: + argocd.argoproj.io/sync-wave: "132" + name: "cert-manager-webhook-ibm-cis-self-signed-issuer" + namespace: "{{ $cert_manager_namespace }}" + labels: + app: "cert-manager-webhook-ibm-cis" +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + selfSigned: {} + +--- +# Generate a CA Certificate used to sign certificates for the webhook +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + annotations: + argocd.argoproj.io/sync-wave: "133" + name: "cert-manager-webhook-ibm-cis-root-ca-certificate" + namespace: "{{ $cert_manager_namespace }}" + labels: + app: "cert-manager-webhook-ibm-cis" +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + secretName: "cert-manager-webhook-ibm-cis-root-ca-certificate" + duration: 43800h0m0s # 5y + issuerRef: + name: "cert-manager-webhook-ibm-cis-self-signed-issuer" + commonName: "ca.cert-manager-webhook-ibm-cis.cert-manager" + isCA: true + +--- +# Create an Issuer that uses the above generated CA certificate to issue certs +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + annotations: + argocd.argoproj.io/sync-wave: "132" + name: "cert-manager-webhook-ibm-cis-root-ca-issuer" + namespace: "{{ $cert_manager_namespace }}" + labels: + app: "cert-manager-webhook-ibm-cis" +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + ca: + secretName: "cert-manager-webhook-ibm-cis-root-ca-certificate" + +--- +# Finally, generate a serving certificate for the webhook to use +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + annotations: + argocd.argoproj.io/sync-wave: "133" + name: "cert-manager-webhook-ibm-cis-serving-cert" + namespace: "{{ $cert_manager_namespace }}" + labels: + app: "cert-manager-webhook-ibm-cis.name" +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + secretName: "cert-manager-webhook-ibm-cis-serving-cert" + duration: 8760h0m0s # 1y + issuerRef: + name: "cert-manager-webhook-ibm-cis-root-ca-issuer" + dnsNames: + - "cert-manager-webhook-ibm-cis" + - "cert-manager-webhook-ibm-cis.{{ $cert_manager_namespace }}" + - "cert-manager-webhook-ibm-cis.{{ $cert_manager_namespace }}.svc" + +{{- end }} diff --git a/instance-applications/130-ibm-mas-suite/templates/00-3-ibm-cis-webhook_deployment.yml.j2 b/instance-applications/130-ibm-mas-suite/templates/00-3-ibm-cis-webhook_deployment.yml.j2 new file mode 100644 index 000000000..5c41816e7 --- /dev/null +++ b/instance-applications/130-ibm-mas-suite/templates/00-3-ibm-cis-webhook_deployment.yml.j2 @@ -0,0 +1,73 @@ +{{- if and (eq .Values.dns_provider "cis") (eq .Values.mas_manual_cert_mgmt "False") }} + +{{ $cis_apiservice_group_name := "acme.cis.ibm.com" }} +{{ $cis_webhook_image_repository := "quay.io/ibmmas/cert-manager-webhook-ibm-cis" }} +{{ $cis_webhook_image_tag := "1.0.0" }} +{{ $cis_webhook_image_pullpolicy := "Always" }} +{{ $cis_webhook_service_type := "ClusterIP" }} +{{ $cis_webhook_service_port := "443" }} +{{ $cis_webhook_log_level := "1" }} +{{ $cert_manager_namespace := "cert-manager" }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + argocd.argoproj.io/sync-wave: "134" + name: "cert-manager-webhook-ibm-cis" + namespace: "{{ $cert_manager_namespace }}" + labels: + app: "cert-manager-webhook-ibm-cis" +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + replicas: 1 + selector: + matchLabels: + app: "cert-manager-webhook-ibm-cis" + template: + metadata: + labels: + app: "cert-manager-webhook-ibm-cis" +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 8 }} +{{- end }} + spec: + serviceAccountName: "cert-manager-webhook-ibm-cis" + containers: + - name: webhook + image: "{{ $cis_webhook_image_repository }}:{{ $cis_webhook_image_tag }}" + imagePullPolicy: {{ $cis_webhook_image_pullpolicy }} + args: + - --tls-cert-file=/tls/tls.crt + - --tls-private-key-file=/tls/tls.key + - --v={{ $cis_webhook_log_level }} + env: + - name: GROUP_NAME + value: "{{ $cis_apiservice_group_name }}" + ports: + - name: https + containerPort: 443 + protocol: TCP + livenessProbe: + httpGet: + scheme: HTTPS + path: /healthz + port: https + readinessProbe: + httpGet: + scheme: HTTPS + path: /healthz + port: https + volumeMounts: + - name: certs + mountPath: /tls + readOnly: true + resources: + volumes: + - name: certs + secret: + secretName: "cert-manager-webhook-ibm-cis-serving-cert" + +{{- end }} diff --git a/instance-applications/130-ibm-mas-suite/templates/00-4-ibm-cis-webhook_apiservice.yml.j2 b/instance-applications/130-ibm-mas-suite/templates/00-4-ibm-cis-webhook_apiservice.yml.j2 new file mode 100644 index 000000000..76d64e120 --- /dev/null +++ b/instance-applications/130-ibm-mas-suite/templates/00-4-ibm-cis-webhook_apiservice.yml.j2 @@ -0,0 +1,30 @@ +{{- if and (eq .Values.dns_provider "cis") (eq .Values.mas_manual_cert_mgmt "False") }} + +{{ $cert_manager_namespace := "cert-manager" }} +{{ $cis_apiservice_group_name := "acme.cis.ibm.com" }} + +--- +apiVersion: apiregistration.k8s.io/v1 +kind: APIService +metadata: + annotations: + argocd.argoproj.io/sync-wave: "135" + name: "v1alpha1.{{ $cis_apiservice_group_name }}" + namespace: "{{ $cert_manager_namespace }}" + labels: + app: "cert-manager-webhook-ibm-cis" +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} + annotations: + cert-manager.io/inject-ca-from: "{{ $cert_manager_namespace }}/cert-manager-webhook-ibm-cis-serving-cert" +spec: + group: "{{ $cis_apiservice_group_name }}" + groupPriorityMinimum: 1000 + versionPriority: 15 + service: + name: cert-manager-webhook-ibm-cis + namespace: "{{ $cert_manager_namespace }}" + version: v1alpha1 + +{{- end }} diff --git a/instance-applications/130-ibm-mas-suite/templates/00-5-ibm-cis-webhook_service.yml.j2 b/instance-applications/130-ibm-mas-suite/templates/00-5-ibm-cis-webhook_service.yml.j2 new file mode 100644 index 000000000..80fd80b39 --- /dev/null +++ b/instance-applications/130-ibm-mas-suite/templates/00-5-ibm-cis-webhook_service.yml.j2 @@ -0,0 +1,31 @@ +{{- if and (eq .Values.dns_provider "cis") (eq .Values.mas_manual_cert_mgmt "False") }} + +{{ $cis_webhook_service_type := "ClusterIP" }} +{{ $cis_webhook_service_port := "443" }} +{{ $cert_manager_namespace := "cert-manager" }} + +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + argocd.argoproj.io/sync-wave: "136" + name: "cert-manager-webhook-ibm-cis" + namespace: "{{ $cert_manager_namespace }}" + labels: + app: "cert-manager-webhook-ibm-cis" +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + type: "{{ $cis_webhook_service_type }}" + ports: + - port: {{ $cis_webhook_service_port }} + targetPort: https + protocol: TCP + name: https + selector: + app: "cert-manager-webhook-ibm-cis" + + +{{- end }} diff --git a/instance-applications/130-ibm-mas-suite/templates/00-6-ibm-cis-webhook_cis-apikey-secret.yml.j2 b/instance-applications/130-ibm-mas-suite/templates/00-6-ibm-cis-webhook_cis-apikey-secret.yml.j2 new file mode 100644 index 000000000..64994d427 --- /dev/null +++ b/instance-applications/130-ibm-mas-suite/templates/00-6-ibm-cis-webhook_cis-apikey-secret.yml.j2 @@ -0,0 +1,21 @@ +{{- if and (eq .Values.dns_provider "cis") (eq .Values.mas_manual_cert_mgmt "False") }} + +{{ $cert_manager_namespace := "cert-manager" }} + +--- +apiVersion: v1 +kind: Secret +metadata: + annotations: + argocd.argoproj.io/sync-wave: "130" + name: cis-api-key + namespace: "{{ $cert_manager_namespace }}" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +type: Opaque +stringData: + key: {{ .Values.cis_apikey }} + +{{- end }} diff --git a/instance-applications/130-ibm-mas-suite/templates/00-7-ibm-cis-webhook_cis-proxy-route.yml.j2 b/instance-applications/130-ibm-mas-suite/templates/00-7-ibm-cis-webhook_cis-proxy-route.yml.j2 new file mode 100644 index 000000000..8373ba4e0 --- /dev/null +++ b/instance-applications/130-ibm-mas-suite/templates/00-7-ibm-cis-webhook_cis-proxy-route.yml.j2 @@ -0,0 +1,30 @@ +{{- if and (eq .Values.dns_provider "cis") (eq .Values.mas_manual_cert_mgmt "False") }} + +{{ $cert_manager_namespace := "cert-manager" }} +{{ $ocp_ingress := printf "cis-proxy-route.%s" .Values.ocp_cluster_domain }} + +--- +kind: Route +apiVersion: route.openshift.io/v1 +metadata: + annotations: + argocd.argoproj.io/sync-wave: "137" + name: cis-proxy-route + namespace: "{{ $cert_manager_namespace }}" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + host: "{{ $ocp_ingress }}" + to: + kind: Service + name: cert-manager-webhook-ibm-cis + weight: 100 + port: + targetPort: https + tls: + termination: passthrough + insecureEdgeTerminationPolicy: Redirect + wildcardPolicy: None +{{- end }} diff --git a/instance-applications/130-ibm-mas-suite/templates/00-8-ibm-cis_clusterissuer-prod.yml.j2 b/instance-applications/130-ibm-mas-suite/templates/00-8-ibm-cis_clusterissuer-prod.yml.j2 new file mode 100644 index 000000000..409271700 --- /dev/null +++ b/instance-applications/130-ibm-mas-suite/templates/00-8-ibm-cis_clusterissuer-prod.yml.j2 @@ -0,0 +1,35 @@ +{{- if and (eq .Values.dns_provider "cis") (eq .Values.mas_manual_cert_mgmt "False") }} + +{{ $cis_apiservice_group_name := "acme.cis.ibm.com" }} +{{ $cis_prod_issuer_name := printf "%s-cis-le-prod" .Values.instance_id }} +--- +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + annotations: + argocd.argoproj.io/sync-wave: "138" + name: "{{ $cis_prod_issuer_name }}" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + acme: + email: "{{ .Values.cis_email }}" + preferredChain: '' + privateKeySecretRef: + name: cis-letsencrypt-production-account-key + server: 'https://acme-v02.api.letsencrypt.org/directory' + solvers: + - dns01: + webhook: + config: + apiKeySecretRef: + key: key + name: cis-api-key + crn: >- + {{ .Values.cis_crn }} + groupName: {{ $cis_apiservice_group_name }} + solverName: cis + +{{- end }} diff --git a/instance-applications/130-ibm-mas-suite/templates/00-8-ibm-cis_clusterissuer-staging.yml.j2 b/instance-applications/130-ibm-mas-suite/templates/00-8-ibm-cis_clusterissuer-staging.yml.j2 new file mode 100644 index 000000000..74ce86b46 --- /dev/null +++ b/instance-applications/130-ibm-mas-suite/templates/00-8-ibm-cis_clusterissuer-staging.yml.j2 @@ -0,0 +1,36 @@ +{{- if and (eq .Values.dns_provider "cis") (eq .Values.mas_manual_cert_mgmt "False") }} + +{{ $cis_apiservice_group_name := "acme.cis.ibm.com" }} +{{ $cis_stg_issuer_name := printf "%s-cis-le-stg" .Values.instance_id }} + +--- +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + annotations: + argocd.argoproj.io/sync-wave: "138" + name: "{{ $cis_stg_issuer_name }}" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + acme: + email: "{{ .Values.cis_email }}" + preferredChain: '' + privateKeySecretRef: + name: cis-letsencrypt-staging-account-key + server: 'https://acme-staging-v02.api.letsencrypt.org/directory' + solvers: + - dns01: + webhook: + config: + apiKeySecretRef: + key: key + name: cis-api-key + crn: >- + {{ .Values.cis_crn }} + groupName: {{ $cis_apiservice_group_name }} + solverName: cis + +{{- end }} diff --git a/instance-applications/130-ibm-mas-suite/templates/01-ibm-mas_OperatorGroup.yaml b/instance-applications/130-ibm-mas-suite/templates/01-ibm-mas_OperatorGroup.yaml new file mode 100644 index 000000000..bde7cac40 --- /dev/null +++ b/instance-applications/130-ibm-mas-suite/templates/01-ibm-mas_OperatorGroup.yaml @@ -0,0 +1,16 @@ +--- +apiVersion: operators.coreos.com/v1 +kind: OperatorGroup +metadata: + name: ibm-mas-operator-group + namespace: mas-{{ .Values.instance_id }}-core + annotations: + argocd.argoproj.io/sync-wave: "131" + argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + targetNamespaces: + - "mas-{{ .Values.instance_id }}-core" diff --git a/instance-applications/130-ibm-mas-suite/templates/01-ibm-mas_suite_cert_Secret.yaml b/instance-applications/130-ibm-mas-suite/templates/01-ibm-mas_suite_cert_Secret.yaml new file mode 100644 index 000000000..c1778ce0c --- /dev/null +++ b/instance-applications/130-ibm-mas-suite/templates/01-ibm-mas_suite_cert_Secret.yaml @@ -0,0 +1,24 @@ +{{- if eq .Values.mas_manual_cert_mgmt "True" }} +--- +kind: Secret +apiVersion: v1 +metadata: + name: "{{ .Values.instance_id }}-cert-public" + namespace: mas-{{ .Values.instance_id }}-core + annotations: + argocd.argoproj.io/sync-wave: "132" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +data: +{{- if .Values.ca_cert }} + ca.crt: {{ .Values.ca_cert }} +{{- end }} +{{- if .Values.tls_cert }} + tls.crt: {{ .Values.tls_cert }} +{{- end }} +{{- if .Values.tls_key }} + tls.key: {{ .Values.tls_key }} +{{- end }} +{{- end }} diff --git a/instance-applications/130-ibm-mas-suite/templates/01-postdelete-crs-resources.yaml b/instance-applications/130-ibm-mas-suite/templates/01-postdelete-crs-resources.yaml new file mode 100644 index 000000000..d9c738747 --- /dev/null +++ b/instance-applications/130-ibm-mas-suite/templates/01-postdelete-crs-resources.yaml @@ -0,0 +1,99 @@ +{{- /* + A collection of resources for supporting the PostDelete hooks on ibm-mas-*-config applications + They are created in this chart (instead of the config application charts) to: + 1. ensure the resources are not deleted by ArgoCD before ArgoCD runs the PostDelete hook jobs that depend on them + 2. reduce code and resource duplication + ( NOTE: this is a workaround for the fact it is not currently possible to annotate these types of resource with + PostDelete due to a bug in ArgoCD: https://github.com/argoproj/argo-cd/issues/17191) and the fact that ArgoCD + does not yet support PreDelete hooks: https://github.com/argoproj/argo-cd/issues/13975, either of which + would offer a preferable solution to this workaround ) +*/}} + + + +{{ $role_name := "postdelete-delete-cr-r" }} +{{ $sa_name := "postdelete-delete-cr-sa" }} +{{ $rb_name := "postdelete-delete-cr-rb" }} +{{ $np_name := "postdelete-delete-cr-np" }} +{{ $job_label := "postdelete-delete-cr-job" }} +{{ $ns := printf "mas-%s-core" .Values.instance_id }} + +--- +# Permit outbound communication by the Job pods +# (Needed to communicate with the K8S HTTP API and AWS SM) +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: {{ $np_name }} + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/sync-wave: "131" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + podSelector: + matchLabels: + app: {{ $job_label }} + egress: + - {} + policyTypes: + - Egress + +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ $role_name }} + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/sync-wave: "131" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +rules: + - verbs: + - delete + - get + - list + - watch + apiGroups: + - "config.mas.ibm.com" + resources: + - "*" + +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: {{ $sa_name }} + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/sync-wave: "131" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} + +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ $rb_name }} + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/sync-wave: "132" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +subjects: + - kind: ServiceAccount + name: {{ $sa_name }} + namespace: {{ $ns }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ $role_name }} diff --git a/instance-applications/130-ibm-mas-suite/templates/02-ibm-mas_Subscription.yaml b/instance-applications/130-ibm-mas-suite/templates/02-ibm-mas_Subscription.yaml new file mode 100644 index 000000000..1f7e5526f --- /dev/null +++ b/instance-applications/130-ibm-mas-suite/templates/02-ibm-mas_Subscription.yaml @@ -0,0 +1,19 @@ +--- +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: ibm-mas-operator + namespace: mas-{{ .Values.instance_id }}-core + annotations: + argocd.argoproj.io/sync-wave: "133" + argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + channel: "{{ .Values.mas_channel }}" + installPlanApproval: Automatic + name: ibm-mas + source: ibm-operator-catalog + sourceNamespace: openshift-marketplace diff --git a/instance-applications/130-ibm-mas-suite/templates/03-ibm-entitlement_Secret.yaml b/instance-applications/130-ibm-mas-suite/templates/03-ibm-entitlement_Secret.yaml new file mode 100644 index 000000000..b856f24e7 --- /dev/null +++ b/instance-applications/130-ibm-mas-suite/templates/03-ibm-entitlement_Secret.yaml @@ -0,0 +1,15 @@ +--- +kind: Secret +apiVersion: v1 +metadata: + name: ibm-entitlement + namespace: mas-{{ .Values.instance_id }}-core + annotations: + argocd.argoproj.io/sync-wave: "133" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: {{ .Values.ibm_entitlement_key }} diff --git a/instance-applications/130-ibm-mas-suite/templates/04-ibm-mas_Suite.yaml b/instance-applications/130-ibm-mas-suite/templates/04-ibm-mas_Suite.yaml new file mode 100644 index 000000000..e22d7a7cd --- /dev/null +++ b/instance-applications/130-ibm-mas-suite/templates/04-ibm-mas_Suite.yaml @@ -0,0 +1,42 @@ +--- +apiVersion: core.mas.ibm.com/v1 +kind: Suite +metadata: + annotations: + argocd.argoproj.io/sync-wave: "139" + argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true +{{- if .Values.mas_annotations }} +{{ .Values.mas_annotations | toYaml | indent 4 }} +{{- end }} + labels: + mas.ibm.com/instanceId: "{{ .Values.instance_id }}" +{{- if .Values.mas_labels }} +{{ .Values.mas_labels | toYaml | indent 4 }} +{{- end }} +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} + name: "{{ .Values.instance_id }}" + namespace: mas-{{ .Values.instance_id }}-core +spec: + domain: "{{ .Values.domain }}" + certManagerNamespace: {{ .Values.cert_manager_namespace | default "cert-manager" }} +{{- if eq .Values.mas_manual_cert_mgmt "False" }} +{{- if or (eq .Values.dns_provider "akamai") (eq .Values.dns_provider "cis") }} + certificateIssuer: + duration: 8760h0m0s + name: "{{ .Values.instance_id }}-{{ .Values.dns_provider }}-le-prod" + renewBefore: 720h0m0s +{{ end }} +{{ end }} + license: + accept: true + settings: + manualCertMgmt: {{ .Values.mas_manual_cert_mgmt | default false }} + icr: + cp: "{{ .Values.icr_cp }}" + cpopen: "{{ .Values.icr_cp_open }}" +{{- if .Values.mas_image_tags }} + imageTags: +{{ .Values.mas_image_tags | toYaml | indent 6 }} +{{ end }} diff --git a/instance-applications/130-ibm-mas-suite/values.yaml b/instance-applications/130-ibm-mas-suite/values.yaml new file mode 100644 index 000000000..ed97d539c --- /dev/null +++ b/instance-applications/130-ibm-mas-suite/values.yaml @@ -0,0 +1 @@ +--- diff --git a/instance-applications/130-ibm-objectstorage-config/Chart.yaml b/instance-applications/130-ibm-objectstorage-config/Chart.yaml new file mode 100644 index 000000000..db061769b --- /dev/null +++ b/instance-applications/130-ibm-objectstorage-config/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +name: ibm-objectstorage-config +description: Objectstorage Config for MAS Core Platform Suite +type: application +version: 1.0.0 diff --git a/instance-applications/130-ibm-objectstorage-config/README.md b/instance-applications/130-ibm-objectstorage-config/README.md new file mode 100644 index 000000000..28aabe82a --- /dev/null +++ b/instance-applications/130-ibm-objectstorage-config/README.md @@ -0,0 +1,5 @@ +ObjectStorage Configuration for MAS Core Platform +=============================================================================== +Create a ObjectStorageCfg CR instance and associated credentials secret for use by MAS. + +Contains a post-delete hook (`postdelete-delete-cr.yaml`) that will ensure the config CR is deleted when the ArgoCD application managing this chart is deleted (this will not happen by default as the config CR is asserted to be owned by the `Suite` CR by the MAS entity managers). \ No newline at end of file diff --git a/instance-applications/130-ibm-objectstorage-config/templates/01-objectstorage-credentials_Secret.yaml b/instance-applications/130-ibm-objectstorage-config/templates/01-objectstorage-credentials_Secret.yaml new file mode 100644 index 000000000..ea34794e6 --- /dev/null +++ b/instance-applications/130-ibm-objectstorage-config/templates/01-objectstorage-credentials_Secret.yaml @@ -0,0 +1,18 @@ +{{- if .Values.suite_objectstorage_username }} +--- +kind: Secret +apiVersion: v1 +metadata: + name: "{{ .Values.objectstorage_config.config.credentials.secretName }}" + namespace: mas-{{ .Values.instance_id }}-core + annotations: + argocd.argoproj.io/sync-wave: "150" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +type: Opaque +stringData: + username: "{{ .Values.suite_objectstorage_username }}" + password: "{{ .Values.suite_objectstorage_password }}" +{{- end }} diff --git a/instance-applications/130-ibm-objectstorage-config/templates/02-ibm-mas_ObjectStorageCfg.yaml b/instance-applications/130-ibm-objectstorage-config/templates/02-ibm-mas_ObjectStorageCfg.yaml new file mode 100644 index 000000000..7b3a0d442 --- /dev/null +++ b/instance-applications/130-ibm-objectstorage-config/templates/02-ibm-mas_ObjectStorageCfg.yaml @@ -0,0 +1,20 @@ +{{- if .Values.objectstorage_config }} +--- +apiVersion: config.mas.ibm.com/v1 +kind: ObjectStorageCfg +metadata: + name: "{{ .Values.mas_config_name }}" + namespace: mas-{{ .Values.instance_id }}-core + labels: + mas.ibm.com/configScope: system + mas.ibm.com/instanceId: "{{ .Values.instance_id }}" +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} + annotations: + argocd.argoproj.io/sync-wave: "151" + argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true +spec: + displayName: Cloud ObjectStorage - {{ .Values.instance_id }}" +{{ .Values.objectstorage_config | toYaml | indent 2 }} +{{- end }} diff --git a/instance-applications/130-ibm-objectstorage-config/templates/postdelete-delete-cr.yaml b/instance-applications/130-ibm-objectstorage-config/templates/postdelete-delete-cr.yaml new file mode 100644 index 000000000..a056c32e7 --- /dev/null +++ b/instance-applications/130-ibm-objectstorage-config/templates/postdelete-delete-cr.yaml @@ -0,0 +1,124 @@ +{{- if .Values.use_postdelete_hooks }} + +{{ $cr_name := .Values.mas_config_name }} +{{ $cr_kind := .Values.mas_config_kind }} +{{ $cr_api_version := .Values.mas_config_api_version }} + +{{ $job_name := printf "postdelete-delete-cr-job-%s" $cr_name }} + +# NOTE: depends on resources created in ibm-mas-suite chart (01-postdelete-crs-resources) +# The values below must align with the values in that file +{{ $role_name := "postdelete-delete-cr-r" }} +{{ $sa_name := "postdelete-delete-cr-sa" }} +{{ $rb_name := "postdelete-delete-cr-rb" }} +{{ $np_name := "postdelete-delete-cr-np" }} +{{ $job_label := "postdelete-delete-cr-job" }} +{{ $ns := printf "mas-%s-core" .Values.instance_id }} + + +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ $job_name }} + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/hook: PostDelete + argocd.argoproj.io/hook-delete-policy: HookSucceeded +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + template: + metadata: + labels: + app: {{ $job_label }} +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 8 }} +{{- end }} + spec: + containers: + - name: run + # TODO: use a dedicated image with a smaller footprint for this sort of thing? + # Just using cli for now since it has all the deps we need to talk with AWS SM + image: quay.io/ibmmas/cli:9.4.0-pre.gitops + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 200m + memory: 512Mi + requests: + cpu: 10m + memory: 64Mi + env: + - name: CR_NAMESPACE + value: {{ $ns }} + - name: CR_NAME + value: {{ $cr_name }} + + - name: CR_API_VERSION + value: {{ $cr_api_version }} + + - name: CR_KIND + value: {{ $cr_kind }} + + volumeMounts: [] + command: + - /bin/sh + - -c + - | + + set -e + + function delete_oc_resource(){ + RESOURCE=$1 + NAMESPACE=$2 + echo + echo "------------------------------------------------------------------" + echo "Check if resource $RESOURCE is present in namespace $NAMESPACE " + + # don't want a non-zero rc from oc delete to cause the job to fail + # so, temporarily set +e + set +e + RESOURCE_NAME=$(oc get $RESOURCE -n $NAMESPACE -o=jsonpath="{.metadata.name}") + set -e + if [[ -z "${RESOURCE_NAME}" ]]; then + echo "$RESOURCE not found, skipping" + return 0 + fi + + echo "oc delete resource $RESOURCE in namespace $NAMESPACE " + + # don't want a non-zero rc from oc delete to cause the job to fail (since we then want to try patching out the finalizers) + # so, temporarily set +e + set +e + oc delete $RESOURCE -n $NAMESPACE --timeout=300s --wait=true + return_code=$? + set -e + + echo "Verify that resource $RESOURCE is now absent in namespace $NAMESPACE " + # don't want a non-zero rc from oc delete to cause the job to fail + # so, temporarily set +e + set +e + RESOURCE_NAME=$(oc get $RESOURCE -n $NAMESPACE -o=jsonpath="{.metadata.name}") + set -e + if [[ -n "${RESOURCE_NAME}" ]]; then + echo "$RESOURCE still present, failing job" + exit 1 + fi + + echo "... verified" + return 0 + + } + + + delete_oc_resource "${CR_KIND}.${CR_API_VERSION}/${CR_NAME}" "${CR_NAMESPACE}" + + + restartPolicy: Never + serviceAccountName: {{ $sa_name }} + volumes: [] + backoffLimit: 4 +{{- end }} diff --git a/instance-applications/130-ibm-objectstorage-config/values.yaml b/instance-applications/130-ibm-objectstorage-config/values.yaml new file mode 100644 index 000000000..3de2c1d75 --- /dev/null +++ b/instance-applications/130-ibm-objectstorage-config/values.yaml @@ -0,0 +1,2 @@ +--- +mas_instance_id: xxx diff --git a/instance-applications/130-ibm-watson-studio-config/Chart.yaml b/instance-applications/130-ibm-watson-studio-config/Chart.yaml new file mode 100644 index 000000000..ffe30dec8 --- /dev/null +++ b/instance-applications/130-ibm-watson-studio-config/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +name: ibm-watson-studio-config +description: WatsonStudio Config for MAS Core Platform Suite +type: application +version: 1.0.0 diff --git a/instance-applications/130-ibm-watson-studio-config/README.md b/instance-applications/130-ibm-watson-studio-config/README.md new file mode 100644 index 000000000..bd0aa5922 --- /dev/null +++ b/instance-applications/130-ibm-watson-studio-config/README.md @@ -0,0 +1,3 @@ +WatsonStudio Configuration for MAS Core Platform +=============================================================================== +Create a WatsonStudioCfg CR instance and associated credentials secret for use by MAS. \ No newline at end of file diff --git a/instance-applications/130-ibm-watson-studio-config/templates/01-watson-studio-credentials_Secret.yaml b/instance-applications/130-ibm-watson-studio-config/templates/01-watson-studio-credentials_Secret.yaml new file mode 100644 index 000000000..1a7211764 --- /dev/null +++ b/instance-applications/130-ibm-watson-studio-config/templates/01-watson-studio-credentials_Secret.yaml @@ -0,0 +1,16 @@ +--- +kind: Secret +apiVersion: v1 +metadata: + name: "{{ .Values.instance_id }}-usersupplied-watsonstudio-creds-system" + namespace: mas-{{ .Values.instance_id }}-core + annotations: + argocd.argoproj.io/sync-wave: "150" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +type: Opaque +stringData: + username: "{{ .Values.suite_watson_studio_username }}" + password: "{{ .Values.suite_watson_studio_password }}" diff --git a/instance-applications/130-ibm-watson-studio-config/templates/02-ibm-mas_WatsonStudioCfg.yaml b/instance-applications/130-ibm-watson-studio-config/templates/02-ibm-mas_WatsonStudioCfg.yaml new file mode 100644 index 000000000..c29acb3fe --- /dev/null +++ b/instance-applications/130-ibm-watson-studio-config/templates/02-ibm-mas_WatsonStudioCfg.yaml @@ -0,0 +1,18 @@ +--- +apiVersion: config.mas.ibm.com/v1 +kind: WatsonStudioCfg +metadata: + name: "{{ .Values.instance_id }}-watsonstudio-system" + namespace: mas-{{ .Values.instance_id }}-core + labels: + mas.ibm.com/configScope: system + mas.ibm.com/instanceId: "{{ .Values.instance_id }}" +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} + annotations: + argocd.argoproj.io/sync-wave: "151" + argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true +spec: + displayName: "MAS - Watson Studio configuration" +{{ .Values.watson_studio_config | toYaml | indent 2 }} diff --git a/instance-applications/130-ibm-watson-studio-config/values.yaml b/instance-applications/130-ibm-watson-studio-config/values.yaml new file mode 100644 index 000000000..3de2c1d75 --- /dev/null +++ b/instance-applications/130-ibm-watson-studio-config/values.yaml @@ -0,0 +1,2 @@ +--- +mas_instance_id: xxx diff --git a/instance-applications/220-ibm-mas-workspace/Chart.yaml b/instance-applications/220-ibm-mas-workspace/Chart.yaml new file mode 100644 index 000000000..f8e0db372 --- /dev/null +++ b/instance-applications/220-ibm-mas-workspace/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +name: ibm-mas-workspace +description: MAS Core Platform Suite Workspace +type: application +version: 1.0.0 diff --git a/instance-applications/220-ibm-mas-workspace/README.md b/instance-applications/220-ibm-mas-workspace/README.md new file mode 100644 index 000000000..245667eba --- /dev/null +++ b/instance-applications/220-ibm-mas-workspace/README.md @@ -0,0 +1,3 @@ +MAS Core Platform workspace +=============================================================================== +Installs the workspace needed for the `Suite`. diff --git a/instance-applications/220-ibm-mas-workspace/templates/01-ibm-mas_WorkspaceCfg.yaml b/instance-applications/220-ibm-mas-workspace/templates/01-ibm-mas_WorkspaceCfg.yaml new file mode 100644 index 000000000..eedc16ab3 --- /dev/null +++ b/instance-applications/220-ibm-mas-workspace/templates/01-ibm-mas_WorkspaceCfg.yaml @@ -0,0 +1,17 @@ +--- +apiVersion: core.mas.ibm.com/v1 +kind: Workspace +metadata: + name: "{{ .Values.instance_id }}-{{ .Values.mas_workspace_id }}" + namespace: mas-{{ .Values.instance_id }}-core + labels: + mas.ibm.com/instanceId: "{{ .Values.instance_id }}" + mas.ibm.com/workspaceId: "{{ .Values.mas_workspace_id }}" +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} + annotations: + argocd.argoproj.io/sync-wave: "220" + argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true +spec: + displayName: "{{ .Values.mas_workspace_name }}" diff --git a/instance-applications/220-ibm-mas-workspace/values.yaml b/instance-applications/220-ibm-mas-workspace/values.yaml new file mode 100644 index 000000000..3de2c1d75 --- /dev/null +++ b/instance-applications/220-ibm-mas-workspace/values.yaml @@ -0,0 +1,2 @@ +--- +mas_instance_id: xxx diff --git a/instance-applications/500-540-ibm-mas-suite-app-install/Chart.yaml b/instance-applications/500-540-ibm-mas-suite-app-install/Chart.yaml new file mode 100644 index 000000000..39a9a07af --- /dev/null +++ b/instance-applications/500-540-ibm-mas-suite-app-install/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +name: ibm-mas-suite-app-install +description: Install Application on MAS Core Platform Suite +type: application +version: 1.0.0 diff --git a/instance-applications/500-540-ibm-mas-suite-app-install/README.md b/instance-applications/500-540-ibm-mas-suite-app-install/README.md new file mode 100644 index 000000000..7ff2d83a0 --- /dev/null +++ b/instance-applications/500-540-ibm-mas-suite-app-install/README.md @@ -0,0 +1,4 @@ +MAS Application Install +=============================================================================== +Generic chart for installing a MAS Application. +Certain templates are enabled only for specific MAS editions (`mas_edition`) and/or applications (`mas_app_id`). \ No newline at end of file diff --git a/instance-applications/500-540-ibm-mas-suite-app-install/templates/00-placeholder.yaml b/instance-applications/500-540-ibm-mas-suite-app-install/templates/00-placeholder.yaml new file mode 100644 index 000000000..553825888 --- /dev/null +++ b/instance-applications/500-540-ibm-mas-suite-app-install/templates/00-placeholder.yaml @@ -0,0 +1,15 @@ +# ArgoCD will error if you attempt to render a chart that generates no manifests +# The only way I can think of to get around this is to put out a dummy resource +# while ibm_mas_suite_app_install is unset (and so none of the other templates in this chart are being rendered) +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: "{{ .Values.instance_id }}-{{ .Values.mas_app_id }}-install-placeholder" + namespace: {{ .Values.mas_app_namespace }} +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +data: + this-is-just-a-placeholder-resource: "" diff --git a/instance-applications/500-540-ibm-mas-suite-app-install/templates/00-presync-add-mvi-scc_Job.yaml b/instance-applications/500-540-ibm-mas-suite-app-install/templates/00-presync-add-mvi-scc_Job.yaml new file mode 100644 index 000000000..042ef60c5 --- /dev/null +++ b/instance-applications/500-540-ibm-mas-suite-app-install/templates/00-presync-add-mvi-scc_Job.yaml @@ -0,0 +1,131 @@ +{{- if .Values.run_sync_hooks }} + {{- if eq .Values.mas_app_id "visualinspection" }} + + +{{ $ns := .Values.mas_app_namespace }} +{{ $np_name := "presync-np-add-mvi-scc-np" }} +{{ $role_name := "presync-np-add-mvi-scc-r" }} +{{ $sa_name := "presync-np-add-mvi-scc-sa" }} +{{ $rb_name := printf "presync-np-add-mvi-scc-%s-rb" .Values.instance_id }} +{{ $job_name := "presync-np-add-mvi-scc-job" }} + + +--- +# Permit outbound communication by the Job pods +# (Needed to communicate with the K8S HTTP API and AWS SM) +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: {{ $np_name }} + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/sync-wave: "-100" + argocd.argoproj.io/hook: PreSync + argocd.argoproj.io/hook-delete-policy: HookSucceeded +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + podSelector: + matchLabels: + job-name: {{ $job_name }} + egress: + - {} + policyTypes: + - Egress + + +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: {{ $sa_name }} + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/sync-wave: "-100" + argocd.argoproj.io/hook: PreSync + argocd.argoproj.io/hook-delete-policy: HookSucceeded +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} + +--- +# We need cluster-admin privileges here in order to manage SCCs +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ $rb_name }} + annotations: + argocd.argoproj.io/sync-wave: "-99" + argocd.argoproj.io/hook: PreSync + argocd.argoproj.io/hook-delete-policy: HookSucceeded +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +subjects: + - kind: ServiceAccount + name: {{ $sa_name }} + namespace: {{ $ns }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin + + + +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ $job_name }} + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/sync-wave: "-98" + argocd.argoproj.io/hook: PreSync + argocd.argoproj.io/hook-delete-policy: HookSucceeded +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + template: +{{- if .Values.custom_labels }} + metadata: + labels: +{{ .Values.custom_labels | toYaml | indent 8 }} +{{- end }} + spec: + containers: + - name: run + # TODO: use a dedicated image with a smaller footprint for this sort of thing? + # Just using cli for now since it has all the deps we need to talk with AWS SM + image: quay.io/ibmmas/cli:9.4.0-pre.gitops + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 200m + memory: 512Mi + requests: + cpu: 10m + memory: 64Mi + env: + - name: MAS_APP_NAMESPACE + value: {{ .Values.mas_app_namespace}} + volumeMounts: [] + command: + - /bin/sh + - -c + - | + oc adm policy add-scc-to-user anyuid system:serviceaccount:${MAS_APP_NAMESPACE}:ibm-mas-visualinspection-operator + + restartPolicy: Never + + serviceAccountName: {{ $sa_name }} + volumes: [] + backoffLimit: 4 + +{{- end }} +{{- end }} diff --git a/instance-applications/500-540-ibm-mas-suite-app-install/templates/01-ibm-mas_namespace.yaml b/instance-applications/500-540-ibm-mas-suite-app-install/templates/01-ibm-mas_namespace.yaml new file mode 100644 index 000000000..677a5ce35 --- /dev/null +++ b/instance-applications/500-540-ibm-mas-suite-app-install/templates/01-ibm-mas_namespace.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: {{ .Values.mas_app_namespace }} + annotations: + argocd.argoproj.io/sync-wave: "500" + labels: + argocd.argoproj.io/managed-by: {{ .Values.argo_namespace }} +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} diff --git a/instance-applications/500-540-ibm-mas-suite-app-install/templates/02-ibm-entitlement_Secret.yaml b/instance-applications/500-540-ibm-mas-suite-app-install/templates/02-ibm-entitlement_Secret.yaml new file mode 100644 index 000000000..0ee996bb3 --- /dev/null +++ b/instance-applications/500-540-ibm-mas-suite-app-install/templates/02-ibm-entitlement_Secret.yaml @@ -0,0 +1,15 @@ +--- +kind: Secret +apiVersion: v1 +metadata: + name: ibm-entitlement + namespace: {{ .Values.mas_app_namespace }} + annotations: + argocd.argoproj.io/sync-wave: "501" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: {{ .Values.ibm_entitlement_key }} diff --git a/instance-applications/500-540-ibm-mas-suite-app-install/templates/02-ibm-mas_suite_cert_Secret.yaml b/instance-applications/500-540-ibm-mas-suite-app-install/templates/02-ibm-mas_suite_cert_Secret.yaml new file mode 100644 index 000000000..213ab0879 --- /dev/null +++ b/instance-applications/500-540-ibm-mas-suite-app-install/templates/02-ibm-mas_suite_cert_Secret.yaml @@ -0,0 +1,27 @@ +{{- if eq .Values.mas_manual_cert_mgmt "true" }} +{{- if .Values.public_tls_secret_name }} +--- +kind: Secret +apiVersion: v1 +metadata: + name: "{{ .Values.public_tls_secret_name }}" + namespace: {{ .Values.mas_app_namespace }} + annotations: + argocd.argoproj.io/sync-wave: "501" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +data: +{{- if .Values.ca_cert }} + ca.crt: {{ .Values.ca_cert }} +{{- end }} +{{- if .Values.tls_cert }} + tls.crt: {{ .Values.tls_cert }} +{{- end }} +{{- if .Values.tls_key }} + tls.key: {{ .Values.tls_key }} +{{- end }} + +{{- end }} +{{- end }} diff --git a/instance-applications/500-540-ibm-mas-suite-app-install/templates/03-ibm-mas_OperatorGroup.yaml b/instance-applications/500-540-ibm-mas-suite-app-install/templates/03-ibm-mas_OperatorGroup.yaml new file mode 100644 index 000000000..11c5acdf0 --- /dev/null +++ b/instance-applications/500-540-ibm-mas-suite-app-install/templates/03-ibm-mas_OperatorGroup.yaml @@ -0,0 +1,16 @@ +--- +apiVersion: operators.coreos.com/v1 +kind: OperatorGroup +metadata: + name: {{ .Values.mas_app_namespace }}-operator-group + namespace: {{ .Values.mas_app_namespace }} + annotations: + argocd.argoproj.io/sync-wave: "501" + argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + targetNamespaces: + - {{ .Values.mas_app_namespace }} diff --git a/instance-applications/500-540-ibm-mas-suite-app-install/templates/04-ibm-mas_Subscription.yaml b/instance-applications/500-540-ibm-mas-suite-app-install/templates/04-ibm-mas_Subscription.yaml new file mode 100644 index 000000000..8b09b5900 --- /dev/null +++ b/instance-applications/500-540-ibm-mas-suite-app-install/templates/04-ibm-mas_Subscription.yaml @@ -0,0 +1,23 @@ +--- +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: ibm-mas-{{ .Values.mas_app_id }} + namespace: {{ .Values.mas_app_namespace }} + annotations: + argocd.argoproj.io/sync-wave: "502" + argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + channel: "{{ .Values.mas_app_channel }}" + installPlanApproval: Automatic +{{- if eq .Values.mas_app_id "health" }} + name: ibm-mas-manage +{{- else }} + name: ibm-mas-{{ .Values.mas_app_id }} +{{- end }} + source: {{ .Values.mas_app_catalog_source }} + sourceNamespace: openshift-marketplace diff --git a/instance-applications/500-540-ibm-mas-suite-app-install/templates/05-ibm-mas_Suite_App_Install.yaml b/instance-applications/500-540-ibm-mas-suite-app-install/templates/05-ibm-mas_Suite_App_Install.yaml new file mode 100644 index 000000000..d7c7a9f70 --- /dev/null +++ b/instance-applications/500-540-ibm-mas-suite-app-install/templates/05-ibm-mas_Suite_App_Install.yaml @@ -0,0 +1,18 @@ +--- +apiVersion: {{ .Values.mas_app_api_version }} +kind: {{ .Values.mas_app_kind }} +metadata: + annotations: + argocd.argoproj.io/sync-wave: "503" + argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true + name: "{{ .Values.instance_id }}" + namespace: {{ .Values.mas_app_namespace }} + labels: + mas.ibm.com/instanceId: {{ .Values.instance_id }} + mas.ibm.com/applicationId: {{ .Values.mas_app_id }} +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: +{{ .Values.mas_app_spec | toYaml | indent 2 }} + diff --git a/instance-applications/500-540-ibm-mas-suite-app-install/templates/05-ibm-mas_custom_service_jvm_config_mvi.yaml b/instance-applications/500-540-ibm-mas-suite-app-install/templates/05-ibm-mas_custom_service_jvm_config_mvi.yaml new file mode 100644 index 000000000..8ad384415 --- /dev/null +++ b/instance-applications/500-540-ibm-mas-suite-app-install/templates/05-ibm-mas_custom_service_jvm_config_mvi.yaml @@ -0,0 +1,39 @@ +{{- if eq .Values.mas_app_id "visualinspection" }} +{{- if eq .Values.mas_edition "essentials-inspection" }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: 'custom-service-jvm-config' + namespace: {{ .Values.mas_app_namespace }} + labels: + app: visualinspection + visualinspection_cr: {{ .Values.instance_id }} + app.kubernetes.io/instance: 'custom-service-jvm-config' + app.kubernetes.io/managed-by: ibm-mas-visualinspection + app.kubernetes.io/name: custom-service-jvm-configmap + mas.ibm.com/instanceId: {{ .Values.instance_id }} + mas.ibm.com/applicationId: visualinspection +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} + annotations: + argocd.argoproj.io/sync-wave: "503" +data: + jvm-options: | + -Dvision.service.config=/etc/vision/config-powerai-k8s.json + -Xms512m + -Xmx7g + -XX:+UseStringDeduplication + -XX:+UseG1GC + -XX:+UseGCLogFileRotation + -XX:NumberOfGCLogFiles=10 + -XX:GCLogFileSize=1M + -XX:+PrintGCDateStamps + -verbose:gc + -XX:+PrintGCDetails + -Xloggc:/opt/powerai-vision/data/logs/vision-service/gc.log + -javaagent:/opt/ibm/wlp/usr/shared/resources/jmx_exporter/jmx_prometheus_javaagent-0.20.0.jar=9081:/opt/ibm/wlp/usr/shared/resources/jmx_exporter/jmx-config.yaml + +{{- end }} +{{- end }} diff --git a/instance-applications/500-540-ibm-mas-suite-app-install/templates/06-postsync-add-mvi-scc_Job.yaml b/instance-applications/500-540-ibm-mas-suite-app-install/templates/06-postsync-add-mvi-scc_Job.yaml new file mode 100644 index 000000000..140056f59 --- /dev/null +++ b/instance-applications/500-540-ibm-mas-suite-app-install/templates/06-postsync-add-mvi-scc_Job.yaml @@ -0,0 +1,131 @@ +{{- if .Values.run_sync_hooks }} +{{- if eq .Values.mas_app_id "visualinspection" }} + + +{{ $ns := .Values.mas_app_namespace }} +{{ $np_name := "postsync-np-add-mvi-scc-np" }} +{{ $role_name := "postsync-np-add-mvi-scc-r" }} +{{ $sa_name := "postsync-np-add-mvi-scc-sa" }} +{{ $rb_name := printf "postsync-np-add-mvi-scc-%s-rb" .Values.instance_id }} +{{ $job_name := "postsync-np-add-mvi-scc-job" }} + + +--- +# Permit outbound communication by the Job pods +# (Needed to communicate with the K8S HTTP API and AWS SM) +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: {{ $np_name }} + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/sync-wave: "100" + argocd.argoproj.io/hook: PostSync + argocd.argoproj.io/hook-delete-policy: HookSucceeded +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + podSelector: + matchLabels: + job-name: {{ $job_name }} + egress: + - {} + policyTypes: + - Egress + + +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: {{ $sa_name }} + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/sync-wave: "100" + argocd.argoproj.io/hook: PostSync + argocd.argoproj.io/hook-delete-policy: HookSucceeded +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} + +--- +# We need cluster-admin privileges here in order to manage SCCs +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ $rb_name }} + annotations: + argocd.argoproj.io/sync-wave: "101" + argocd.argoproj.io/hook: PostSync + argocd.argoproj.io/hook-delete-policy: HookSucceeded +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +subjects: + - kind: ServiceAccount + name: {{ $sa_name }} + namespace: {{ $ns }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin + + + +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ $job_name }} + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/sync-wave: "102" + argocd.argoproj.io/hook: PostSync + argocd.argoproj.io/hook-delete-policy: HookSucceeded +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + template: +{{- if .Values.custom_labels }} + metadata: + labels: +{{ .Values.custom_labels | toYaml | indent 8 }} +{{- end }} + spec: + containers: + - name: run + # TODO: use a dedicated image with a smaller footprint for this sort of thing? + # Just using cli for now since it has all the deps we need to talk with AWS SM + image: quay.io/ibmmas/cli:9.4.0-pre.gitops + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 200m + memory: 512Mi + requests: + cpu: 10m + memory: 64Mi + env: + - name: MAS_APP_NAMESPACE + value: {{ .Values.mas_app_namespace}} + volumeMounts: [] + command: + - /bin/sh + - -c + - | + oc adm policy remove-scc-from-user anyuid system:serviceaccount:${MAS_APP_NAMESPACE}:ibm-mas-visualinspection-operator + + restartPolicy: Never + + serviceAccountName: {{ $sa_name }} + volumes: [] + backoffLimit: 4 + +{{- end }} +{{- end }} diff --git a/instance-applications/500-540-ibm-mas-suite-app-install/values.yaml b/instance-applications/500-540-ibm-mas-suite-app-install/values.yaml new file mode 100644 index 000000000..638a0a151 --- /dev/null +++ b/instance-applications/500-540-ibm-mas-suite-app-install/values.yaml @@ -0,0 +1,31 @@ +--- +run_sync_hooks: true +mas_manual_cert_mgmt: "False" + + +# #################################################### +# # Required values. No defaults provided. +# # Values shown here are for illustration purposes only +# #################################################### + +# instance_id: "instance1" +# mas_app_id: "manage" +# mas_app_namespace: "mas-instance1-manage" +# mas_app_api_version: "apps.mas.ibm.com/v1" +# mas_app_kind: "ManageApp" +# argo_namespace: "openshift-gitops" +# ibm_entitlement_key: "" +# mas_app_catalog_source: "ibm-operator-catalog" +# mas_edition "essentials-inspection" +# mas_app_spec: +# settings: +# ... + + +# #################################################### +# # Required iff mas_manual_cert_mgmt is "True" +# #################################################### +# public_tls_secret_name: "public-tls-cert" +# ca_cert: "" +# tls_cert: "" +# tls_key: "" \ No newline at end of file diff --git a/instance-applications/510-550-ibm-mas-suite-app-config/Chart.yaml b/instance-applications/510-550-ibm-mas-suite-app-config/Chart.yaml new file mode 100644 index 000000000..efae0d41f --- /dev/null +++ b/instance-applications/510-550-ibm-mas-suite-app-config/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +name: ibm-mas-suite-app-config +description: MAS Core Platform Suite Application Configuration +type: application +version: 1.0.0 diff --git a/instance-applications/510-550-ibm-mas-suite-app-config/README.md b/instance-applications/510-550-ibm-mas-suite-app-config/README.md new file mode 100644 index 000000000..883cba15b --- /dev/null +++ b/instance-applications/510-550-ibm-mas-suite-app-config/README.md @@ -0,0 +1,4 @@ +MAS Application Configuration +=============================================================================== +Generic chart for configuring a workspace for a MAS application (a.k.a "activating" the MAS application). +Certain templates are enabled only for specific MAS applications (`mas_app_id`). \ No newline at end of file diff --git a/instance-applications/510-550-ibm-mas-suite-app-config/templates/00-placeholder.yaml b/instance-applications/510-550-ibm-mas-suite-app-config/templates/00-placeholder.yaml new file mode 100644 index 000000000..06bacace6 --- /dev/null +++ b/instance-applications/510-550-ibm-mas-suite-app-config/templates/00-placeholder.yaml @@ -0,0 +1,14 @@ +# ArgoCD will error if you attempt to render a chart that generates no manifests +# The only way I can think of to get around this is to put out a dummy resource +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: "{{ .Values.instance_id }}-{{ .Values.mas_app_id }}-{{ .Values.mas_workspace_id }}-config-placeholder" + namespace: {{ .Values.mas_app_namespace }} +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +data: + this-is-just-a-placeholder-resource: "" diff --git a/instance-applications/510-550-ibm-mas-suite-app-config/templates/01-ibm-manage_additional_server_config_secrets.yaml b/instance-applications/510-550-ibm-mas-suite-app-config/templates/01-ibm-manage_additional_server_config_secrets.yaml new file mode 100644 index 000000000..e90fda98f --- /dev/null +++ b/instance-applications/510-550-ibm-mas-suite-app-config/templates/01-ibm-manage_additional_server_config_secrets.yaml @@ -0,0 +1,20 @@ +{{- if (or (eq $.Values.mas_app_id "manage") (eq $.Values.mas_app_id "health")) }} +{{- range $key, $value := $.Values.mas_app_server_bundles_combined_add_server_config }} +--- +kind: Secret +apiVersion: v1 +metadata: + annotations: + argocd.argoproj.io/sync-wave: "600" + argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true + name: {{ $key }} + namespace: {{ $.Values.mas_app_namespace }} +{{- if $.Values.custom_labels }} + labels: +{{ $.Values.custom_labels | toYaml | indent 4 }} +{{- end }} +data: + server-custom.xml: >- + {{ $value }} +{{- end }} +{{- end }} diff --git a/instance-applications/510-550-ibm-mas-suite-app-config/templates/01-ibm-manage_customization_archive_secrets.yaml b/instance-applications/510-550-ibm-mas-suite-app-config/templates/01-ibm-manage_customization_archive_secrets.yaml new file mode 100644 index 000000000..61eab6308 --- /dev/null +++ b/instance-applications/510-550-ibm-mas-suite-app-config/templates/01-ibm-manage_customization_archive_secrets.yaml @@ -0,0 +1,23 @@ +{{- if (or (eq $.Values.mas_app_id "manage") (eq $.Values.mas_app_id "health")) }} +{{- range $key, $value := $.Values.customization_archive_secret_names }} +--- +kind: Secret +apiVersion: v1 +metadata: + annotations: + argocd.argoproj.io/sync-wave: "600" + argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true +{{- if $.Values.custom_labels }} + labels: +{{ $.Values.custom_labels | toYaml | indent 4 }} +{{- end }} + name: {{ $value.secret_name }} + namespace: {{ $.Values.mas_app_namespace }} +stringData: + credentials: | + user={{ $value.username }} + password={{ $value.password }} +type: Opaque +{{- end }} +{{- end }} + diff --git a/instance-applications/510-550-ibm-mas-suite-app-config/templates/01-ibm-mas_suite_cert_Secret.yaml b/instance-applications/510-550-ibm-mas-suite-app-config/templates/01-ibm-mas_suite_cert_Secret.yaml new file mode 100644 index 000000000..14ffbce31 --- /dev/null +++ b/instance-applications/510-550-ibm-mas-suite-app-config/templates/01-ibm-mas_suite_cert_Secret.yaml @@ -0,0 +1,27 @@ +{{- if eq .Values.mas_manual_cert_mgmt "true" }} +{{- if .Values.public_tls_secret_name }} +--- +kind: Secret +apiVersion: v1 +metadata: + name: "{{ .Values.public_tls_secret_name }}" + namespace: {{ .Values.mas_app_namespace }} + annotations: + argocd.argoproj.io/sync-wave: "601" +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +data: +{{- if .Values.ca_cert }} + ca.crt: {{ .Values.ca_cert }} +{{- end }} +{{- if .Values.tls_cert }} + tls.crt: {{ .Values.tls_cert }} +{{- end }} +{{- if .Values.tls_key }} + tls.key: {{ .Values.tls_key }} +{{- end }} + +{{- end }} +{{- end }} diff --git a/instance-applications/510-550-ibm-mas-suite-app-config/templates/02-ibm-mas_Suite_App_workspace.yaml b/instance-applications/510-550-ibm-mas-suite-app-config/templates/02-ibm-mas_Suite_App_workspace.yaml new file mode 100644 index 000000000..a71acf09c --- /dev/null +++ b/instance-applications/510-550-ibm-mas-suite-app-config/templates/02-ibm-mas_Suite_App_workspace.yaml @@ -0,0 +1,18 @@ +--- +apiVersion: {{ .Values.mas_app_ws_apiversion }} +kind: {{ .Values.mas_app_ws_kind }} +metadata: + annotations: + argocd.argoproj.io/sync-wave: "601" + argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true + name: "{{ .Values.instance_id }}-{{ .Values.mas_workspace_id }}" + namespace: {{ .Values.mas_app_namespace }} + labels: + mas.ibm.com/instanceId: {{ .Values.instance_id }} + mas.ibm.com/workspaceId: {{ .Values.mas_workspace_id }} + mas.ibm.com/applicationId: {{ .Values.mas_app_id }} +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: +{{ .Values.mas_appws_spec | toYaml | indent 2 }} diff --git a/instance-applications/510-550-ibm-mas-suite-app-config/templates/04-postsync-manage-verify.yaml b/instance-applications/510-550-ibm-mas-suite-app-config/templates/04-postsync-manage-verify.yaml new file mode 100644 index 000000000..b42832ac6 --- /dev/null +++ b/instance-applications/510-550-ibm-mas-suite-app-config/templates/04-postsync-manage-verify.yaml @@ -0,0 +1,273 @@ +{{- if eq .Values.mas_app_id "manage" }} + + +{{ $ns := .Values.mas_app_namespace }} +{{ $np_name := "postsync-verify-manage-np" }} +{{ $role_name := "postsync-verify-manage-role" }} +{{ $sa_name := "postsync-verify-manage-sa" }} +{{ $rb_name := "postsync-verify-manage-rb" }} +{{ $tests_cm_name := "postsync-verify-tests-manage-cm" }} +{{ $job_name := "postsync-verify-manage-job" }} + + + +--- +# Permit outbound communication by the Job pod +# (Needed to communicate with the K8S HTTP API, PyPI, manage Route) +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: {{ $np_name }} + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/sync-wave: "100" + argocd.argoproj.io/hook: PostSync + argocd.argoproj.io/hook-delete-policy: HookSucceeded +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + podSelector: + matchLabels: + app: {{ $job_name }} + egress: + - {} + policyTypes: + - Egress + + +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: {{ $sa_name }} + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/sync-wave: "100" + argocd.argoproj.io/hook: PostSync + argocd.argoproj.io/hook-delete-policy: HookSucceeded +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} + + + +--- +# ------------------------------------- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ $role_name }} + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/sync-wave: "100" + argocd.argoproj.io/hook: PostSync + argocd.argoproj.io/hook-delete-policy: HookSucceeded +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +rules: + - verbs: + - get + - list + apiGroups: + - "apps.mas.ibm.com" + resources: + - manageworkspace + - manageapp + - manageworkspaces + - manageapps +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ $rb_name }} + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/sync-wave: "101" + argocd.argoproj.io/hook: PostSync + argocd.argoproj.io/hook-delete-policy: HookSucceeded +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +subjects: + - kind: ServiceAccount + name: {{ $sa_name }} + namespace: {{ $ns }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ $role_name }} +# ------------------------------------- + + +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: {{ $tests_cm_name }} + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/sync-wave: "102" + argocd.argoproj.io/hook: PostSync + argocd.argoproj.io/hook-delete-policy: HookSucceeded +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +immutable: false +data: + requirements.txt: |- + pytest + kubernetes + openshift + tests.py: |- + from kubernetes import client,config + from kubernetes.client import Configuration + from openshift.dynamic import DynamicClient + import pytest + import os + + mas_instance_id = os.getenv("MAS_INSTANCE_ID") + if mas_instance_id is None: + raise Exception(f"Required MAS_INSTANCE_ID environment variable is not set") + + # e.g. "masdev" + mas_workspace_id = os.getenv("MAS_WORKSPACE_ID") + if mas_workspace_id is None: + raise Exception(f"Required MAS_WORKSPACE_ID environment variable is not set") + + manage_namespace = f"mas-{mas_instance_id}-manage" + + @pytest.fixture(scope="session") + def dyn_client(): + if "KUBERNETES_SERVICE_HOST" in os.environ: + config.load_incluster_config() + k8s_config = Configuration.get_default_copy() + k8s_client = client.api_client.ApiClient(configuration=k8s_config) + else: + k8s_client = config.new_client_from_config() + dyn_client = DynamicClient(k8s_client) + yield dyn_client + + @pytest.fixture(scope="session") + def v1_manageworkspace(dyn_client): + yield dyn_client.resources.get(api_version='apps.mas.ibm.com/v1', kind='ManageWorkspace') + + @pytest.fixture(scope="session") + def v1_manageapp(dyn_client): + yield dyn_client.resources.get(api_version='apps.mas.ibm.com/v1', kind='ManageApp') + + @pytest.fixture(scope="session") + def mange_workspace_reconciled_version(mange_workspace_cr): + try: + yield mange_workspace_cr['status']['versions']['reconciled'] + except KeyError as e: + assert False, f"Unable to determine ManageWorkspace reconciled version. Error details: {e}" + + @pytest.fixture(scope="session") + def manage_version(mange_app_cr): + try: + yield mange_app_cr['status']['components']['manage']['version'] + except KeyError as e: + assert False, f"Unable to determine ManageApp component version. Error details: {e}" + + @pytest.fixture(scope="session") + def mange_workspace_cr(v1_manageworkspace): + yield v1_manageworkspace.get(namespace=manage_namespace, label_selector=f"mas.ibm.com/instanceId={mas_instance_id}, mas.ibm.com/workspaceId={mas_workspace_id}").items[0] + + @pytest.fixture(scope="session") + def mange_app_cr(v1_manageapp): + yield v1_manageapp.get(namespace=manage_namespace, label_selector=f"mas.ibm.com/instanceId={mas_instance_id}").items[0] + + def test_expected_reconciled_version(mange_workspace_reconciled_version, manage_version): + assert manage_version == mange_workspace_reconciled_version, f"Expected ManageWorkspace Reconciled version: {mange_workspace_reconciled_version} to match Manage Operator version: {manage_version}" + + def test_languages_set(mange_workspace_cr): + status_langs = mange_workspace_cr['status']['settings']['languages'] + spec_langs = mange_workspace_cr['spec']['settings']['languages'] + if spec_langs is not None: + assert status_langs == spec_langs, f"Expected languages set in the spec: {spec_langs} to be equal to the languages in the status: {status_langs}" + + def test_addons_enabled(mange_workspace_cr): + for component in mange_workspace_cr['spec']['components']: + print(f"check component {component[0]} in status") + assert mange_workspace_cr['status']['components'][component[0]]["enabled"], f"Expected component {component} to be enabled in status" + + def test_bundle_sizes(mange_workspace_cr): + spec_serverbundles = mange_workspace_cr['spec']['settings']['deployment']['serverBundles'] + status_serverbundles = mange_workspace_cr['status']['settings']['deployment']['serverBundles'] + if spec_serverbundles is not None: + assert status_serverbundles == spec_serverbundles, f"Expected serverbundles set in the spec: {spec_serverbundles} to be equal to the languages in the status: {status_serverbundles}" + + +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ $job_name }} + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/sync-wave: "103" + argocd.argoproj.io/hook: PostSync + argocd.argoproj.io/hook-delete-policy: HookSucceeded +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + template: + metadata: + labels: + app: {{ $job_name }} +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 8 }} +{{- end }} + spec: + imagePullSecrets: [] + containers: + - name: run + image: quay.io/ibmmas/cli:9.4.0-pre.gitops + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 200m + memory: 512Mi + requests: + cpu: 10m + memory: 64Mi + env: + - name: MAS_INSTANCE_ID + value: "{{ .Values.instance_id }}" + - name: MAS_WORKSPACE_ID + value: "{{ .Values.mas_workspace_id }}" + volumeMounts: + - name: tests + mountPath: /tmp/tests + command: + - /bin/sh + - -c + - | + pip install -r /tmp/tests/requirements.txt + pytest -o cache_dir=/tmp/__pycache__ /tmp/tests/tests.py + restartPolicy: Never + serviceAccountName: {{ $sa_name }} + volumes: + - name: tests + configMap: + name: {{ $tests_cm_name }} + items: + - key: requirements.txt + path: requirements.txt + - key: tests.py + path: tests.py + defaultMode: 420 + optional: false + backoffLimit: 4 + + +{{- end }} diff --git a/instance-applications/510-550-ibm-mas-suite-app-config/templates/04-postsync-mvi-verify.yaml b/instance-applications/510-550-ibm-mas-suite-app-config/templates/04-postsync-mvi-verify.yaml new file mode 100644 index 000000000..eea9a212a --- /dev/null +++ b/instance-applications/510-550-ibm-mas-suite-app-config/templates/04-postsync-mvi-verify.yaml @@ -0,0 +1,429 @@ +{{- if eq .Values.mas_app_id "visualinspection" }} + + +{{ $ns := .Values.mas_app_namespace }} +{{ $np_name := "postsync-verify-mvi-np" }} +{{ $role_name := "postsync-verify-mvi-role" }} +{{ $crole_name := "postsync-verify-mvi-crole" }} +{{ $sa_name := "postsync-verify-mvi-sa" }} +{{ $rb_name := "postsync-verify-mvi-rb" }} +{{ $crb_name := "postsync-verify-mvi-crb" }} +{{ $tests_cm_name := "postsync-verify-tests-mvi-cm" }} +{{ $job_name := "postsync-verify-mvi-job" }} + + + +--- +# Permit outbound communication by the Job pod +# (Needed to communicate with the K8S HTTP API, PyPI, MVI Route) +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: {{ $np_name }} + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/sync-wave: "100" + argocd.argoproj.io/hook: PostSync + argocd.argoproj.io/hook-delete-policy: HookSucceeded +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + podSelector: + matchLabels: + app: {{ $job_name }} + egress: + - {} + policyTypes: + - Egress + + +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: {{ $sa_name }} + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/sync-wave: "100" + argocd.argoproj.io/hook: PostSync + argocd.argoproj.io/hook-delete-policy: HookSucceeded +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} + + + +--- +# ------------------------------------- +# MVI Verification tests needs to get the MVI api key secret and the MVI route resources +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ $role_name }} + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/sync-wave: "100" + argocd.argoproj.io/hook: PostSync + argocd.argoproj.io/hook-delete-policy: HookSucceeded +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +rules: + - verbs: + - get + - list + apiGroups: + - "" + resources: + - secrets + - verbs: + - get + apiGroups: + - "route.openshift.io" + resources: + - routes + +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ $rb_name }} + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/sync-wave: "101" + argocd.argoproj.io/hook: PostSync + argocd.argoproj.io/hook-delete-policy: HookSucceeded +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +subjects: + - kind: ServiceAccount + name: {{ $sa_name }} + namespace: {{ $ns }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ $role_name }} +# ------------------------------------- + + + + +--- +# ------------------------------------- +# MVI Verification tests need to be able to get/list nodes +# to check for ready GPU nodes +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ $crole_name }} + annotations: + argocd.argoproj.io/sync-wave: "100" + argocd.argoproj.io/hook: PostSync + argocd.argoproj.io/hook-delete-policy: HookSucceeded +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +rules: + - verbs: + - get + - list + apiGroups: + - "" + resources: + - nodes + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ $crb_name }} + annotations: + argocd.argoproj.io/sync-wave: "101" + argocd.argoproj.io/hook: PostSync + argocd.argoproj.io/hook-delete-policy: HookSucceeded +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +subjects: + - kind: ServiceAccount + name: {{ $sa_name }} + namespace: {{ $ns }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ $crole_name }} +# ------------------------------------- + + +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: {{ $tests_cm_name }} + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/sync-wave: "102" + argocd.argoproj.io/hook: PostSync + argocd.argoproj.io/hook-delete-policy: HookSucceeded +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +immutable: false +data: + requirements.txt: |- + pytest + requests + kubernetes + openshift + semver + tests.py: |- + from kubernetes import client,config + from kubernetes.client import Configuration + from openshift.dynamic import DynamicClient + import base64 + import requests + import pytest + import tempfile + import os + import semver + + + # e.g. "dclain-1a" + mas_instance_id = os.getenv("MAS_INSTANCE_ID") + if mas_instance_id is None: + raise Exception(f"Required MAS_INSTANCE_ID environment variable is not set") + + # e.g. "masdev" + mas_workspace_id = os.getenv("MAS_WORKSPACE_ID") + if mas_workspace_id is None: + raise Exception(f"Required MAS_WORKSPACE_ID environment variable is not set") + + mvi_namespace = f"mas-{mas_instance_id}-visualinspection" + mvi_route_name = f"{mas_instance_id}-{mas_workspace_id}" + + + @pytest.fixture(scope="session") + def dyn_client(): + if "KUBERNETES_SERVICE_HOST" in os.environ: + config.load_incluster_config() + k8s_config = Configuration.get_default_copy() + k8s_client = client.api_client.ApiClient(configuration=k8s_config) + else: + k8s_client = config.new_client_from_config() + dyn_client = DynamicClient(k8s_client) + yield dyn_client + + @pytest.fixture(scope="session") + def v1_nodes(dyn_client): + yield dyn_client.resources.get(api_version='v1', kind='Node') + + @pytest.fixture(scope="session") + def gpu_nodes(v1_nodes): + yield v1_nodes.get(label_selector='nvidia.com/gpu.count>0') + + @pytest.fixture(scope="session") + def v1_secrets(dyn_client): + yield dyn_client.resources.get(api_version='v1', kind='Secret') + + @pytest.fixture(scope="session") + def v1_routes(dyn_client): + yield dyn_client.resources.get(api_version='route.openshift.io/v1', kind='Route') + + + @pytest.fixture(scope="session") + def mvi_route(v1_routes): + yield v1_routes.get(name=mvi_route_name, namespace=mvi_namespace) + + @pytest.fixture(scope="session") + def mvi_host(mvi_route): + try: + yield mvi_route['spec']['host'] + except KeyError as e: + assert False, f"Unable to determine MVI host; spec.host key not present in {mvi_route_name}/{mvi_namespace}: {mvi_route}. Error details: {e}" + + @pytest.fixture(scope="session") + def mvi_host_ca_filepath(mvi_route): + try: + mvi_route_ca = mvi_route['spec']['tls']['caCertificate'] + except KeyError as e: + assert False, f"Unable to determine MVI ca cetrt; spec.tls.caCertificate key not present in {mvi_route_name}/{mvi_namespace}: {mvi_route}. Error details: {e}" + + with tempfile.NamedTemporaryFile() as fp: + fp.write(mvi_route_ca.encode()) + fp.flush() + yield fp.name + + + @pytest.fixture(scope="session") + def mvi_version(mvi_host, mvi_host_ca_filepath): + resp = requests.get( + f"https://{mvi_host}/api/version-info", + verify=mvi_host_ca_filepath + ) + resp_json = resp.json() + assert resp.status_code == 200, f"Expected status 200 from GET /api/version-info, but got {resp.status_code}. Body: {resp_json}." + try: + raw_version_str = resp_json['version'] + stripped_version = raw_version_str.split("-")[0] + return semver.VersionInfo.parse(stripped_version) + except KeyError as e: + assert False, f"Expected version key not found in response from GET /api/version-info. Body: {resp_json}" + + + + @pytest.fixture(scope="session") + def mvi_apikey(v1_secrets, mvi_version): + ''' + Obtaining this API key is only currently possible in MVI versions >= 9.0.0, so any test + that attempts to use this fixture against MVI < 9.0.0 will be skipped at present + ''' + if mvi_version.compare("9.0.0") < 0: + pytest.skip("MVI API Key secret required by this test is only currently available in MVI >= 9.0.0") + + mvi_apikey_secret = v1_secrets.get(label_selector="app.kubernetes.io/name=visualinspection-dnn-secret", namespace=mvi_namespace) + if not mvi_apikey_secret: + assert False, f"Could not find any visual inspection dnn secret in {mvi_namespace}" + + mvi_apikey_secret_name = mvi_apikey_secret.items[0]['metadata']['name'] + try: + dnn_apikey_b64 = mvi_apikey_secret.items[0]['data']['dnn-apikey'] + except: + assert False, "Expected data.dnn-apikey field not present in secret {mvi_apikey_secret_name} in {mvi_namespace}" + yield base64.b64decode(dnn_apikey_b64) + + + def test_at_least_one_ready_gpu_node(gpu_nodes): + # We may have installed MVI on a cluster with no GPU nodes (e.g. an internal test system) + # In that case, skip this test so we don't block the pipeline unnecessarily + if len(gpu_nodes.items) == 0: + pytest.skip("Cluster does not have an GPU nodes, skipping test") + + # If there are GPU nodes, check at least one of them is ready + for gpu_node in gpu_nodes.items: + for cond in gpu_node.status.conditions: + if cond.type == 'Ready' and cond.status == 'True': + return + assert False, f"No GPU nodes with condition Ready=True found in the cluster." + + + def test_at_least_one_gpu_visible_to_mvi(gpu_nodes, mvi_host, mvi_apikey, mvi_host_ca_filepath): + + # We may have installed MVI on a cluster with no GPU nodes (e.g. an internal test system) + # In that case, skip this test so we don't block the pipeline unnecessarily + if len(gpu_nodes.items) == 0: + pytest.skip("Cluster does not have an GPU nodes, skipping test") + + # If there are GPU nodes, check at least one of them is visible to MVI + resp = requests.get( + f"https://{mvi_host}/api/system/device-info", + headers={ + 'X-Auth-Token': mvi_apikey + }, + verify=mvi_host_ca_filepath + ) + assert resp.status_code == 200 + resp_json = resp.json() + try: + total_gpu_count = resp_json['gpu_info']['total'] + assert total_gpu_count > 0, f"No GPU Nodes available for use by MVI according to GET /api/system/device-info. Body: {resp_json}" + except KeyError as e: + assert False, f"Expected gpu_info.total key not found in response from GET /api/system/device-info: {resp_json}. Error details: {e}" + + def test_ping_api(mvi_host, mvi_host_ca_filepath): + resp = requests.get( + f"https://{mvi_host}/api/ping", + verify=mvi_host_ca_filepath + ) + resp_json = resp.json() + assert resp.status_code == 200, f"Expected status 200 from GET /api/ping, but got {resp.status_code}. Body: {resp_json}." + + try: + assert resp_json['healthy'] == True, f"Expected healthy field from GET /api/ping to be true, but was {resp_json['healthy']}. Body: {resp_json}" + except KeyError as e: + assert False, f"Expected healthy key not found in response from GET /api/ping. Body: {resp_json}" + + try: + assert resp_json['status'] == "Ready", f"Expected status field from GET /api/ping to be 'Ready', but was {resp_json['status']}. Body: {resp_json}" + except KeyError as e: + assert False, f"Expected status key not found in response from GET /api/ping. Body: {resp_json}" + + + + + + +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ $job_name }} + namespace: {{ $ns }} + annotations: + argocd.argoproj.io/sync-wave: "103" + argocd.argoproj.io/hook: PostSync + argocd.argoproj.io/hook-delete-policy: HookSucceeded +{{- if .Values.custom_labels }} + labels: +{{ .Values.custom_labels | toYaml | indent 4 }} +{{- end }} +spec: + template: + metadata: + labels: + app: {{ $job_name }} +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 8 }} +{{- end }} + spec: + imagePullSecrets: [] + containers: + - name: run + image: quay.io/ibmmas/cli:9.4.0-pre.gitops + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 200m + memory: 512Mi + requests: + cpu: 10m + memory: 64Mi + env: + - name: MAS_INSTANCE_ID + value: "{{ .Values.instance_id }}" + - name: MAS_WORKSPACE_ID + value: "{{ .Values.mas_workspace_id }}" + volumeMounts: + - name: tests + mountPath: /tmp/tests + command: + - /bin/sh + - -c + - | + pip install -r /tmp/tests/requirements.txt + pytest -o cache_dir=/tmp/__pycache__ /tmp/tests/tests.py + restartPolicy: Never + serviceAccountName: {{ $sa_name }} + volumes: + - name: tests + configMap: + name: {{ $tests_cm_name }} + items: + - key: requirements.txt + path: requirements.txt + - key: tests.py + path: tests.py + defaultMode: 420 + optional: false + backoffLimit: 4 + + +{{- end }} diff --git a/instance-applications/510-550-ibm-mas-suite-app-config/values.yaml b/instance-applications/510-550-ibm-mas-suite-app-config/values.yaml new file mode 100644 index 000000000..99842de16 --- /dev/null +++ b/instance-applications/510-550-ibm-mas-suite-app-config/values.yaml @@ -0,0 +1,45 @@ +--- +mas_manual_cert_mgmt: "False" + +# #################################################### +# # Required values. No defaults provided. +# # Values shown here are for illustration purposes only +# #################################################### + +# instance_id: "instance1" +# mas_app_id: "manage" +# mas_workspace_id: "masdev" +# mas_app_namespace: "mas-instance1-manage" +# mas_app_ws_apiversion: "apps.mas.ibm.com/v1" +# mas_app_ws_kind: "ManageWorkspace" +# mas_appws_spec: +# bindings: +# jdbc: workspace-application +# components: +# base: +# version: latest +# settings: +# ... + + +# #################################################### +# # Required iff mas_manual_cert_mgmt is "True" +# #################################################### + +# public_tls_secret_name: "public-tls-cert" +# ca_cert: "" +# tls_cert: "" +# tls_key: "" + + +# #################################################### +# # Required iff mas_app_id is "manage" or "health" +# #################################################### + +# mas_app_server_bundles_combined_add_server_config: +# masdev-manage-d--sb0--asc--sn: "" +# ... +# customization_archive_secret_names: +# - secret_name: "" +# username: "" +# password: "" \ No newline at end of file diff --git a/main.py b/main.py new file mode 100644 index 000000000..24f63c52c --- /dev/null +++ b/main.py @@ -0,0 +1,81 @@ +def define_env(env): + + gitops_repo_url = "https://github.com/ibm-mas/gitops" + gitops_repo_branch = "main" + + env.variables["gitops_repo_url"] = gitops_repo_url + env.variables["gitops_repo_branch"] = gitops_repo_branch + + + def config_repo(): + return "**Config Repository**" + env.macro(config_repo) + + def source_repo(): + return "**Source Repository**" + env.macro(source_repo) + + def secrets_vault(): + return "**Secrets Vault**" + env.macro(secrets_vault) + + def management_cluster(): + return "**Management Cluster**" + env.macro(management_cluster) + + + def target_cluster(): + return "**Target Cluster**" + env.macro(target_cluster) + + def target_clusters(): + return "**Target Clusters**" + env.macro(target_clusters) + + + def gitops_repo_file_url(path): + return f"{gitops_repo_url}/blob/{gitops_repo_branch}/{path}" + env.macro(gitops_repo_file_url) + + def gitops_repo_dir_url(path): + return f"{gitops_repo_url}/tree/{gitops_repo_branch}/{path}" + env.macro(gitops_repo_dir_url) + + + def gitops_repo_file_link(path, name=None): + if name is None: name = path + return f"[{name}]({gitops_repo_file_url(path)})" + env.macro(gitops_repo_file_link) + + def gitops_repo_dir_link(path, name=None): + if name is None: name = path + return f"[{name}]({gitops_repo_dir_url(path)})" + env.macro(gitops_repo_dir_link) + + + + + + def account_root_chart(): + return gitops_repo_dir_link("root-applications/ibm-mas-account-root", "Account Root Chart") + env.macro(account_root_chart) + + + def cluster_root_chart(): + return gitops_repo_dir_link("root-applications/ibm-mas-cluster-root", "Cluster Root Chart") + env.macro(cluster_root_chart) + + def instance_root_chart(): + return gitops_repo_dir_link("root-applications/ibm-mas-instance-root", "Instance Root Chart") + env.macro(instance_root_chart) + + + + def cluster_root_app_set(): + return gitops_repo_file_link("root-applications/ibm-mas-account-root/templates/000-cluster-appset.yaml", "Cluster Root Application Set") + env.macro(cluster_root_app_set) + + def instance_root_app_set(): + return gitops_repo_file_link("root-applications/ibm-mas-cluster-root/templates/099-instance-appset.yaml", "Instance Root Application Set") + env.macro(instance_root_app_set) + diff --git a/mkdocs.yml b/mkdocs.yml new file mode 100644 index 000000000..ead07c29d --- /dev/null +++ b/mkdocs.yml @@ -0,0 +1,50 @@ +# mkdocs serve + +site_name: MAS GitOps +site_url: https://pages.github.com/ibm-mas/gitops + +repo_name: GitHub +repo_url: https://github.com/ibm-mas/gitops +edit_uri: blob/demo2/docs/ + +nav: + - 'Home': index.md + - 'Architecture': + - 'Overview': 'architecture.md' + - 'The Source Repository': helmcharts.md + - 'The Config Repository': configrepo.md + - 'The Secrets Vault': secrets.md + + - 'Details': + - 'Mapping Config to MAS Deployments': configtoinstances.md + - 'Deployment Orchestration': orchestration.md + + - 'Reference': + - 'Account Root Application Manifest': accountrootmanifest.md + - 'Known Limitations': limitations.md + +theme: + name: readthedocs + highlightjs: true + hljs_languages: + - yaml + prev_next_buttons_location: none + features: + - content.code.annotate +extra_css: + - extra.css + - https://cdn.jsdelivr.net/gh/fancyapps/fancybox@3.5.7/dist/jquery.fancybox.min.css +extra_javascript: + - https://cdn.jsdelivr.net/npm/jquery@3.5.1/dist/jquery.min.js + - https://cdn.jsdelivr.net/gh/fancyapps/fancybox@3.5.7/dist/jquery.fancybox.min.js + +plugins: + - macros + - drawio_file + +markdown_extensions: + - toc: + permalink:  + - admonition + - attr_list + - markdown.extensions.footnotes \ No newline at end of file diff --git a/root-applications/ibm-mas-account-root/Chart.yaml b/root-applications/ibm-mas-account-root/Chart.yaml new file mode 100644 index 000000000..8fecf19cf --- /dev/null +++ b/root-applications/ibm-mas-account-root/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +name: ibm-mas-account-root +description: IBM MAS account root +type: application +version: 1.0.0 diff --git a/root-applications/ibm-mas-account-root/README.md b/root-applications/ibm-mas-account-root/README.md new file mode 100644 index 000000000..24eee37cd --- /dev/null +++ b/root-applications/ibm-mas-account-root/README.md @@ -0,0 +1,3 @@ +IBM MAS Account Root Application +=============================================================================== +Installs the Cluster Root ArgoCD ApplicationSet (`000-cluster-appset.yaml`) responsible for generating a set of IBM MAS Cluster Root ArgoCD Applications. \ No newline at end of file diff --git a/root-applications/ibm-mas-account-root/templates/000-cluster-appset.yaml b/root-applications/ibm-mas-account-root/templates/000-cluster-appset.yaml new file mode 100644 index 000000000..cdb26fd2d --- /dev/null +++ b/root-applications/ibm-mas-account-root/templates/000-cluster-appset.yaml @@ -0,0 +1,111 @@ + +--- +# IBM Maximo Application Suite Account Root Application Set +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: cluster-appset.{{ .Values.account.id }} + namespace: {{ .Values.argo.namespace }} + labels: + environment: '{{ .Values.account.id }}' + annotations: + argocd.argoproj.io/sync-wave: "000" +spec: + goTemplate: true + generators: + - merge: + mergeKeys: + # Would love to use ".path.path" here to merge the configs together + # but this is not possible currently with goTemplate: true - see https://github.com/argoproj/argo-cd/issues/12836 + # (and we really want goTemplate: true so we can dump the entire config file into HELM_VALUES using toPrettyJson) + # instead, we are forced to explicitly provide a merge key in all of our config files to make them line up + - 'merge-key' + generators: + # This is the "base" generator, it *must* be first in the list + - git: + repoURL: "{{ .Values.generator.repo_url }}" + revision: "{{ .Values.generator.revision }}" + files: + - path: "{{ .Values.account.id }}/*/ibm-mas-cluster-base.yaml" + - git: + repoURL: "{{ .Values.generator.repo_url }}" + revision: "{{ .Values.generator.revision }}" + files: + - path: "{{ .Values.account.id }}/*/ibm-operator-catalog.yaml" + - git: + repoURL: "{{ .Values.generator.repo_url }}" + revision: "{{ .Values.generator.revision }}" + files: + - path: "{{ .Values.account.id }}/*/redhat-cert-manager.yaml" + - git: + repoURL: "{{ .Values.generator.repo_url }}" + revision: "{{ .Values.generator.revision }}" + files: + - path: "{{ .Values.account.id }}/*/ibm-dro.yaml" + - git: + repoURL: "{{ .Values.generator.repo_url }}" + revision: "{{ .Values.generator.revision }}" + files: + - path: "{{ .Values.account.id }}/*/ibm-db2u.yaml" + - git: + repoURL: "{{ .Values.generator.repo_url }}" + revision: "{{ .Values.generator.revision }}" + files: + - path: "{{ .Values.account.id }}/*/cis-compliance.yaml" + - git: + repoURL: "{{ .Values.generator.repo_url }}" + revision: "{{ .Values.generator.revision }}" + files: + - path: "{{ .Values.account.id }}/*/nvidia-gpu-operator.yaml" + template: + metadata: + name: "cluster.{{ `{{.cluster.id}}` }}" + labels: + environment: '{{ .Values.account.id }}' + region: '{{ `{{ .region.id }}` }}' + cluster: '{{ `{{ .cluster.id }}` }}' +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 8 }} +{{- end }} + annotations: + healthCheckTimeout: "1800" + spec: + project: "{{ .Values.argo.projects.rootapps }}" + source: + repoURL: "{{ .Values.source.repo_url }}" + targetRevision: "{{ .Values.source.revision }}" + path: root-applications/ibm-mas-cluster-root + helm: + releaseName: clusterappset + values: "{{ `{{ toYaml . }}` }}" + parameters: + - name: "generator.repo_url" + value: "{{ .Values.generator.repo_url }}" + - name: "generator.revision" + value: "{{ .Values.generator.revision }}" + - name: "source.revision" + value: "{{ .Values.source.revision }}" + - name: "source.repo_url" + value: "{{ .Values.source.repo_url }}" + - name: argo.namespace + value: "{{ .Values.argo.namespace }}" + - name: argo.projects.rootapps + value: "{{ .Values.argo.projects.rootapps }}" + - name: argo.projects.apps + value: "{{ .Values.argo.projects.apps }}" + - name: avp.name + value: "{{ .Values.avp.name }}" + - name: avp.secret + value: "{{ .Values.avp.secret }}" + - name: avp.values_varname + value: "{{ .Values.avp.values_varname }}" + destination: + server: 'https://kubernetes.default.svc' + namespace: {{ .Values.argo.namespace }} + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=false + - RespectIgnoreDifferences=true diff --git a/root-applications/ibm-mas-account-root/values.yaml b/root-applications/ibm-mas-account-root/values.yaml new file mode 100644 index 000000000..c603ebd3b --- /dev/null +++ b/root-applications/ibm-mas-account-root/values.yaml @@ -0,0 +1,25 @@ +--- + +avp: + name: "argocd-vault-plugin-helm" + secret: "" + values_varname: "HELM_VALUES" + +source: + repo_url: "https://github.com/ibm-mas/gitops" + revision: "poc" + +# Customers will definitely need to customise this (our gitops-envs/mas-dev repos are private), +# So deliberately not specifying defaults here; we want charts to refuse to render if these are not specified +# Both of these correspond to requirement arguments of the gitops-bootstrap CLI function (--github-url and --github-revision) +# generator: +# repo_url: "" +# revision: "" + +# These defaults align with the ArgoCD worker setup by gitops-bootstrap +# (openshift-gitops with a single ArgoCD project "mas") +argo: + namespace: "openshift-gitops" + projects: + rootapps: "mas" + apps: "mas" diff --git a/root-applications/ibm-mas-cluster-root/Chart.yaml b/root-applications/ibm-mas-cluster-root/Chart.yaml new file mode 100644 index 000000000..7aa069c38 --- /dev/null +++ b/root-applications/ibm-mas-cluster-root/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +name: ibm-mas-cluster-root +description: IBM MAS cluster root +type: application +version: 1.0.0 diff --git a/root-applications/ibm-mas-cluster-root/README.md b/root-applications/ibm-mas-cluster-root/README.md new file mode 100644 index 000000000..69501b008 --- /dev/null +++ b/root-applications/ibm-mas-cluster-root/README.md @@ -0,0 +1,5 @@ +IBM MAS Cluster Root Application +=============================================================================== +Installs various ArgoCD Applications for managing dependencies shared by MAS instances on the target cluster. + +Also installs the MAS Instance Root ArgoCD ApplicationSet (`099-instance-appset.yaml`) responsible for generating a set of IBM MAS Instance Root ArgoCD Applications for managing MAS instances on the target cluster. diff --git a/root-applications/ibm-mas-cluster-root/templates/000-ibm-operator-catalog-app.yaml b/root-applications/ibm-mas-cluster-root/templates/000-ibm-operator-catalog-app.yaml new file mode 100644 index 000000000..cf3b5e29b --- /dev/null +++ b/root-applications/ibm-mas-cluster-root/templates/000-ibm-operator-catalog-app.yaml @@ -0,0 +1,60 @@ +{{- if not (empty .Values.ibm_operator_catalog) }} +--- +# IBM Maximo Operator Catalog +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: operator-catalog.{{ .Values.cluster.id }} + namespace: {{ .Values.argo.namespace }} + labels: + environment: '{{ .Values.account.id }}' + region: '{{ .Values.region.id }}' + cluster: '{{ .Values.cluster.id }}' + annotations: + argocd.argoproj.io/sync-wave: "000" + healthCheckTimeout: "1800" + {{- if and .Values.notifications .Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-failed.workspace1: {{ .Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-succeeded.workspace1: {{ .Values.notifications.slack_channel_id }} + {{- end }} + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + ignoreDifferences: + - group: '*' + kind: ServiceAccount + jsonPointers: + - /imagePullSecrets + project: "{{ .Values.argo.projects.apps }}" + destination: + server: {{ .Values.cluster.url }} + namespace: default + source: + repoURL: "{{ .Values.source.repo_url }}" + path: cluster-applications/000-ibm-operator-catalog + targetRevision: "{{ .Values.source.revision }}" + plugin: + name: {{ .Values.avp.name }} + env: + - name: {{ .Values.avp.values_varname }} + value: | + ibm_entitlement_key: "{{ .Values.ibm_operator_catalog.ibm_entitlement_key }}" + mas_catalog_version: "{{ .Values.ibm_operator_catalog.mas_catalog_version }}" + mas_catalog_image: "{{ .Values.ibm_operator_catalog.mas_catalog_image }}" + {{- if .Values.custom_labels }} + custom_labels: {{ .Values.custom_labels | toYaml | nindent 14 }} + {{- end }} + - name: ARGOCD_APP_NAME + value: operatorcatalogapp + {{- if not (empty .Values.avp.secret) }} + - name: AVP_SECRET + value: {{ .Values.avp.secret }} + {{- end }} + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=false + - RespectIgnoreDifferences=true +{{- end }} \ No newline at end of file diff --git a/root-applications/ibm-mas-cluster-root/templates/010-ibm-redhat-cert-manager-app.yaml b/root-applications/ibm-mas-cluster-root/templates/010-ibm-redhat-cert-manager-app.yaml new file mode 100644 index 000000000..27b48adef --- /dev/null +++ b/root-applications/ibm-mas-cluster-root/templates/010-ibm-redhat-cert-manager-app.yaml @@ -0,0 +1,60 @@ +{{- if not (empty .Values.redhat_cert_manager) }} +--- +# Redhat Cert Manager +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: redhat-cert-manager.{{ .Values.cluster.id }} + namespace: {{ .Values.argo.namespace }} + labels: + environment: '{{ .Values.account.id }}' + region: '{{ .Values.region.id }}' + cluster: '{{ .Values.cluster.id }}' + annotations: + argocd.argoproj.io/sync-wave: "010" + healthCheckTimeout: "1800" + {{- if and .Values.notifications .Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-failed.workspace1: {{ .Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-succeeded.workspace1: {{ .Values.notifications.slack_channel_id }} + {{- end }} + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + project: "{{ .Values.argo.projects.apps }}" + destination: + server: {{ .Values.cluster.url }} + namespace: default + source: + repoURL: "{{ .Values.source.repo_url }}" + path: cluster-applications/010-redhat-cert-manager + targetRevision: "{{ .Values.source.revision }}" + plugin: + name: {{ .Values.avp.name }} + env: + - name: {{ .Values.avp.values_varname }} + value: | + account_id: "{{ .Values.account.id }}" + region_id: "{{ .Values.region.id }}" + cluster_id: "{{ .Values.cluster.id }}" + argo_namespace: "{{ .Values.argo.namespace }}" + sm_aws_access_key_id: "{{ .Values.sm.aws_access_key_id }}" + sm_aws_secret_access_key: "{{ .Values.sm.aws_secret_access_key }}" + channel: "{{ .Values.redhat_cert_manager.channel }}" + run_sync_hooks: {{ .Values.redhat_cert_manager.run_sync_hooks }} + {{- if .Values.custom_labels }} + custom_labels: {{ .Values.custom_labels | toYaml | nindent 14 }} + {{- end }} + - name: ARGOCD_APP_NAME + value: rcmapp + {{- if not (empty .Values.avp.secret) }} + - name: AVP_SECRET + value: {{ .Values.avp.secret }} + {{- end }} + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=false + - RespectIgnoreDifferences=true +{{- end }} \ No newline at end of file diff --git a/root-applications/ibm-mas-cluster-root/templates/020-ibm-dro-app.yaml b/root-applications/ibm-mas-cluster-root/templates/020-ibm-dro-app.yaml new file mode 100644 index 000000000..72e9e04e2 --- /dev/null +++ b/root-applications/ibm-mas-cluster-root/templates/020-ibm-dro-app.yaml @@ -0,0 +1,73 @@ +{{- if not (empty .Values.ibm_dro) }} +--- +# IBM Maximo Operator Catalog +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: dro.{{ .Values.cluster.id }} + namespace: {{ .Values.argo.namespace }} + labels: + environment: '{{ .Values.account.id }}' + region: '{{ .Values.region.id }}' + cluster: '{{ .Values.cluster.id }}' + annotations: + argocd.argoproj.io/sync-wave: "020" + healthCheckTimeout: "1800" + {{- if and .Values.notifications .Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-failed.workspace1: {{ .Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-succeeded.workspace1: {{ .Values.notifications.slack_channel_id }} + {{- end }} + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + ignoreDifferences: + - group: 'marketplace.redhat.com/v1alpha1' + kind: MarketplaceConfig + jsonPointers: + - /spec + project: "{{ .Values.argo.projects.apps }}" + destination: + server: {{ .Values.cluster.url }} + namespace: default + source: + repoURL: "{{ .Values.source.repo_url }}" + path: cluster-applications/020-ibm-dro + targetRevision: "{{ .Values.source.revision }}" + plugin: + name: {{ .Values.avp.name }} + env: + - name: {{ .Values.avp.values_varname }} + value: | + account_id: "{{ .Values.account.id }}" + region_id: "{{ .Values.region.id }}" + cluster_id: "{{ .Values.cluster.id }}" + argo_namespace: "{{ .Values.argo.namespace }}" + sm_aws_access_key_id: "{{ .Values.sm.aws_access_key_id }}" + sm_aws_secret_access_key: "{{ .Values.sm.aws_secret_access_key }}" + run_sync_hooks: {{.Values.ibm_dro.run_sync_hooks }} + dro_namespace: "{{ .Values.ibm_dro.dro_namespace }}" + ibm_entitlement_key: "{{ .Values.ibm_dro.ibm_entitlement_key }}" + dro_cmm_setup: "{{ .Values.ibm_dro.dro_cmm_setup }}" + {{- if .Values.ibm_dro.dro_cmm_setup }} + dro_cmm: + auth_apikey: "{{ .Values.ibm_dro.dro_cmm.auth_apikey }}" + auth_url: "{{ .Values.ibm_dro.dro_cmm.auth_url }}" + cmm_url: "{{ .Values.ibm_dro.dro_cmm.cmm_url }}" + {{- end }} + {{- if .Values.custom_labels }} + custom_labels: {{ .Values.custom_labels | toYaml | nindent 14 }} + {{- end }} + - name: ARGOCD_APP_NAME + value: droapp + {{- if not (empty .Values.avp.secret) }} + - name: AVP_SECRET + value: {{ .Values.avp.secret }} + {{- end }} + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=false + - RespectIgnoreDifferences=true +{{- end }} \ No newline at end of file diff --git a/root-applications/ibm-mas-cluster-root/templates/021-ibm-dro-cleanup.yaml b/root-applications/ibm-mas-cluster-root/templates/021-ibm-dro-cleanup.yaml new file mode 100644 index 000000000..39a8aa5dc --- /dev/null +++ b/root-applications/ibm-mas-cluster-root/templates/021-ibm-dro-cleanup.yaml @@ -0,0 +1,56 @@ +{{- if not (empty .Values.ibm_dro) }} +--- +# IBM Maximo Operator Catalog +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: ibm-dro-cleanup.{{ .Values.cluster.id }} + namespace: {{ .Values.argo.namespace }} + labels: + environment: '{{ .Values.account.id }}' + region: '{{ .Values.region.id }}' + cluster: '{{ .Values.cluster.id }}' + annotations: + argocd.argoproj.io/sync-wave: "021" + healthCheckTimeout: "1800" + {{- if and .Values.notifications .Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-failed.workspace1: {{ .Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-succeeded.workspace1: {{ .Values.notifications.slack_channel_id }} + {{- end }} + finalizers: + - resources-finalizer.argocd.argoproj.io + - post-delete-finalizer.argocd.argoproj.io + - post-delete-finalizer.argocd.argoproj.io/cleanup +spec: + project: "{{ .Values.argo.projects.apps }}" + destination: + server: {{ .Values.cluster.url }} + namespace: default + source: + repoURL: "{{ .Values.source.repo_url }}" + path: cluster-applications/021-ibm-dro-cleanup + targetRevision: "{{ .Values.source.revision }}" + plugin: + name: {{ .Values.avp.name }} + env: + - name: {{ .Values.avp.values_varname }} + value: | + dro_namespace: "{{ .Values.ibm_dro.dro_namespace }}" + {{- if .Values.custom_labels }} + custom_labels: {{ .Values.custom_labels | toYaml | nindent 14 }} + {{- end }} + - name: ARGOCD_APP_NAME + value: ibmdrocleanup + {{- if not (empty .Values.avp.secret) }} + - name: AVP_SECRET + value: {{ .Values.avp.secret }} + {{- end }} + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=false + - RespectIgnoreDifferences=true + - Validate=false +{{- end }} \ No newline at end of file diff --git a/root-applications/ibm-mas-cluster-root/templates/040-cis-compliance-app.yaml b/root-applications/ibm-mas-cluster-root/templates/040-cis-compliance-app.yaml new file mode 100644 index 000000000..32052c0e9 --- /dev/null +++ b/root-applications/ibm-mas-cluster-root/templates/040-cis-compliance-app.yaml @@ -0,0 +1,54 @@ +{{- if not (empty .Values.cis_compliance) }} +--- +# IBM Maximo Operator Catalog +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: cis-compliance.{{ .Values.cluster.id }} + namespace: {{ .Values.argo.namespace }} + labels: + environment: '{{ .Values.account.id }}' + region: '{{ .Values.region.id }}' + cluster: '{{ .Values.cluster.id }}' + annotations: + argocd.argoproj.io/sync-wave: "040" + healthCheckTimeout: "1800" + {{- if and .Values.notifications .Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-failed.workspace1: {{ .Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-succeeded.workspace1: {{ .Values.notifications.slack_channel_id }} + {{- end }} + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + project: "{{ .Values.argo.projects.apps }}" + destination: + server: {{ .Values.cluster.url }} + namespace: default + source: + repoURL: "{{ .Values.source.repo_url }}" + path: cluster-applications/040-cis-compliance + targetRevision: "{{ .Values.source.revision }}" + plugin: + name: {{ .Values.avp.name }} + env: + - name: {{ .Values.avp.values_varname }} + value: | + argo_namespace: "{{ .Values.argo.namespace }}" + {{- if .Values.custom_labels }} + custom_labels: {{ .Values.custom_labels | toYaml | nindent 14 }} + {{- end }} + - name: ARGOCD_APP_NAME + value: ciscomplianceapp + {{- if not (empty .Values.avp.secret) }} + - name: AVP_SECRET + value: {{ .Values.avp.secret }} + {{- end }} + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=false + - RespectIgnoreDifferences=true + - Validate=false +{{- end }} \ No newline at end of file diff --git a/root-applications/ibm-mas-cluster-root/templates/041-cis-compliance-cleanup.yaml b/root-applications/ibm-mas-cluster-root/templates/041-cis-compliance-cleanup.yaml new file mode 100644 index 000000000..a13f5a532 --- /dev/null +++ b/root-applications/ibm-mas-cluster-root/templates/041-cis-compliance-cleanup.yaml @@ -0,0 +1,56 @@ +{{- if not (empty .Values.cis_compliance) }} +--- +# IBM Maximo Operator Catalog +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: cis-compliance-cleanup.{{ .Values.cluster.id }} + namespace: {{ .Values.argo.namespace }} + labels: + environment: '{{ .Values.account.id }}' + region: '{{ .Values.region.id }}' + cluster: '{{ .Values.cluster.id }}' + annotations: + argocd.argoproj.io/sync-wave: "041" + healthCheckTimeout: "1800" + {{- if and .Values.notifications .Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-failed.workspace1: {{ .Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-succeeded.workspace1: {{ .Values.notifications.slack_channel_id }} + {{- end }} + finalizers: + - resources-finalizer.argocd.argoproj.io + - post-delete-finalizer.argocd.argoproj.io + - post-delete-finalizer.argocd.argoproj.io/cleanup +spec: + project: "{{ .Values.argo.projects.apps }}" + destination: + server: {{ .Values.cluster.url }} + namespace: default + source: + repoURL: "{{ .Values.source.repo_url }}" + path: cluster-applications/041-cis-compliance-cleanup + targetRevision: "{{ .Values.source.revision }}" + plugin: + name: {{ .Values.avp.name }} + env: + - name: {{ .Values.avp.values_varname }} + value: | + argo_namespace: "{{ .Values.argo.namespace }}" + {{- if .Values.custom_labels }} + custom_labels: {{ .Values.custom_labels | toYaml | nindent 14 }} + {{- end }} + - name: ARGOCD_APP_NAME + value: ciscompliancecleanup + {{- if not (empty .Values.avp.secret) }} + - name: AVP_SECRET + value: {{ .Values.avp.secret }} + {{- end }} + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=false + - RespectIgnoreDifferences=true + - Validate=false +{{- end }} \ No newline at end of file diff --git a/root-applications/ibm-mas-cluster-root/templates/050-nvidia-gpu-operator-app.yaml b/root-applications/ibm-mas-cluster-root/templates/050-nvidia-gpu-operator-app.yaml new file mode 100644 index 000000000..b9905f304 --- /dev/null +++ b/root-applications/ibm-mas-cluster-root/templates/050-nvidia-gpu-operator-app.yaml @@ -0,0 +1,58 @@ +{{- if not (empty .Values.nvidia_gpu_operator) }} +--- +# IBM Maximo Operator Catalog +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: nvidia-gpu.{{ .Values.cluster.id }} + namespace: {{ .Values.argo.namespace }} + labels: + environment: '{{ .Values.account.id }}' + region: '{{ .Values.region.id }}' + cluster: '{{ .Values.cluster.id }}' + annotations: + argocd.argoproj.io/sync-wave: "050" + healthCheckTimeout: "1800" + {{- if and .Values.notifications .Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-failed.workspace1: {{ .Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-succeeded.workspace1: {{ .Values.notifications.slack_channel_id }} + {{- end }} + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + project: "{{ .Values.argo.projects.apps }}" + destination: + server: {{ .Values.cluster.url }} + namespace: default + source: + repoURL: "{{ .Values.source.repo_url }}" + path: cluster-applications/050-nvidia-gpu-operator + targetRevision: "{{ .Values.source.revision }}" + plugin: + name: {{ .Values.avp.name }} + env: + - name: {{ .Values.avp.values_varname }} + value: | + argo_namespace: "{{ .Values.argo.namespace }}" + nfd_namespace: "{{ .Values.nvidia_gpu_operator.nfd_namespace }}" + nfd_channel: "{{ .Values.nvidia_gpu_operator.nfd_channel }}" + gpu_namespace: "{{ .Values.nvidia_gpu_operator.gpu_namespace }}" + gpu_channel: "{{ .Values.nvidia_gpu_operator.gpu_channel }}" + gpu_driver_repository_path: "{{ .Values.nvidia_gpu_operator.gpu_driver_repository_path }}" + gpu_driver_version: "{{ .Values.nvidia_gpu_operator.gpu_driver_version }}" + {{- if .Values.custom_labels }} + custom_labels: {{ .Values.custom_labels | toYaml | nindent 14 }} + {{- end }} + - name: ARGOCD_APP_NAME + value: nvidiagpuapp + {{- if not (empty .Values.avp.secret) }} + - name: AVP_SECRET + value: {{ .Values.avp.secret }} + {{- end }} + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=false +{{- end }} \ No newline at end of file diff --git a/root-applications/ibm-mas-cluster-root/templates/060-ibm-db2u-app.yaml b/root-applications/ibm-mas-cluster-root/templates/060-ibm-db2u-app.yaml new file mode 100644 index 000000000..67a86e94d --- /dev/null +++ b/root-applications/ibm-mas-cluster-root/templates/060-ibm-db2u-app.yaml @@ -0,0 +1,55 @@ +{{- if not (empty .Values.ibm_db2u) }} +--- +# IBM Maximo Operator Catalog +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: db2u.{{ .Values.cluster.id }} + namespace: {{ .Values.argo.namespace }} + labels: + environment: '{{ .Values.account.id }}' + region: '{{ .Values.region.id }}' + cluster: '{{ .Values.cluster.id }}' + annotations: + argocd.argoproj.io/sync-wave: "060" + healthCheckTimeout: "1800" + {{- if and .Values.notifications .Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-failed.workspace1: {{ .Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-succeeded.workspace1: {{ .Values.notifications.slack_channel_id }} + {{- end }} + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + project: "{{ .Values.argo.projects.apps }}" + destination: + server: {{ .Values.cluster.url }} + namespace: default + source: + repoURL: "{{ .Values.source.repo_url }}" + path: cluster-applications/060-ibm-db2u + targetRevision: "{{ .Values.source.revision }}" + plugin: + name: {{ .Values.avp.name }} + env: + - name: {{ .Values.avp.values_varname }} + value: | + argo_namespace: "{{ .Values.argo.namespace }}" + db2_namespace: "{{ .Values.ibm_db2u.db2_namespace }}" + ibm_entitlement_key: "{{ .Values.ibm_db2u.ibm_entitlement_key }}" + db2_channel: "{{ .Values.ibm_db2u.db2_channel }}" + {{- if .Values.custom_labels }} + custom_labels: {{ .Values.custom_labels | toYaml | nindent 14 }} + {{- end }} + - name: ARGOCD_APP_NAME + value: db2uapp + {{- if not (empty .Values.avp.secret) }} + - name: AVP_SECRET + value: {{ .Values.avp.secret }} + {{- end }} + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=false +{{- end }} \ No newline at end of file diff --git a/root-applications/ibm-mas-cluster-root/templates/099-instance-appset.yaml b/root-applications/ibm-mas-cluster-root/templates/099-instance-appset.yaml new file mode 100644 index 000000000..39cb0910d --- /dev/null +++ b/root-applications/ibm-mas-cluster-root/templates/099-instance-appset.yaml @@ -0,0 +1,182 @@ + +--- +# IBM Maximo Application Suite Instance Application Set +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: instance-appset.{{ .Values.cluster.id }} + namespace: {{ .Values.argo.namespace }} + labels: + environment: '{{ .Values.account.id }}' + region: '{{ .Values.region.id }}' + cluster: '{{ .Values.cluster.id }}' + annotations: + argocd.argoproj.io/sync-wave: "099" +spec: + goTemplate: true + generators: + - merge: + mergeKeys: + # Would love to use ".path.path" here to merge the configs together + # but this is not possible currently with goTemplate: true - see https://github.com/argoproj/argo-cd/issues/12836 + # (and we really want goTemplate: true so we can dump the entire config file into HELM_VALUES using toPrettyJson) + # instead, we are forced to explicitly provide a merge key in all of our config files to make them line up + - 'merge-key' + generators: + # This is the "base" generator, it *must* be first in the list + - git: + repoURL: "{{ .Values.generator.repo_url }}" + revision: "{{ .Values.generator.revision }}" + files: + - path: "{{ .Values.account.id }}/{{ .Values.cluster.id }}/*/ibm-mas-instance-base.yaml" + - git: + repoURL: "{{ .Values.generator.repo_url }}" + revision: "{{ .Values.generator.revision }}" + files: + - path: "{{ .Values.account.id }}/{{ .Values.cluster.id }}/*/ibm-mas-suite.yaml" + - git: + repoURL: "{{ .Values.generator.repo_url }}" + revision: "{{ .Values.generator.revision }}" + files: + - path: "{{ .Values.account.id }}/{{ .Values.cluster.id }}/*/ibm-sls.yaml" + - git: + repoURL: "{{ .Values.generator.repo_url }}" + revision: "{{ .Values.generator.revision }}" + files: + - path: "{{ .Values.account.id }}/{{ .Values.cluster.id }}/*/ibm-mas-workspaces.yaml" + - git: + repoURL: "{{ .Values.generator.repo_url }}" + revision: "{{ .Values.generator.revision }}" + files: + - path: "{{ .Values.account.id }}/{{ .Values.cluster.id }}/*/ibm-mas-suite-configs.yaml" + - git: + repoURL: "{{ .Values.generator.repo_url }}" + revision: "{{ .Values.generator.revision }}" + files: + - path: "{{ .Values.account.id }}/{{ .Values.cluster.id }}/*/ibm-db2u-databases.yaml" + - git: + repoURL: "{{ .Values.generator.repo_url }}" + revision: "{{ .Values.generator.revision }}" + files: + - path: "{{ .Values.account.id }}/{{ .Values.cluster.id }}/*/ibm-mas-masapp-manage-install.yaml" + - git: + repoURL: "{{ .Values.generator.repo_url }}" + revision: "{{ .Values.generator.revision }}" + files: + - path: "{{ .Values.account.id }}/{{ .Values.cluster.id }}/*/ibm-mas-masapp-iot-install.yaml" + - git: + repoURL: "{{ .Values.generator.repo_url }}" + revision: "{{ .Values.generator.revision }}" + files: + - path: "{{ .Values.account.id }}/{{ .Values.cluster.id }}/*/ibm-mas-masapp-assist-install.yaml" + - git: + repoURL: "{{ .Values.generator.repo_url }}" + revision: "{{ .Values.generator.revision }}" + files: + - path: "{{ .Values.account.id }}/{{ .Values.cluster.id }}/*/ibm-mas-masapp-visualinspection-install.yaml" + - git: + repoURL: "{{ .Values.generator.repo_url }}" + revision: "{{ .Values.generator.revision }}" + files: + - path: "{{ .Values.account.id }}/{{ .Values.cluster.id }}/*/ibm-mas-masapp-optimizer-install.yaml" + - git: + repoURL: "{{ .Values.generator.repo_url }}" + revision: "{{ .Values.generator.revision }}" + files: + - path: "{{ .Values.account.id }}/{{ .Values.cluster.id }}/*/ibm-mas-masapp-monitor-install.yaml" + - git: + repoURL: "{{ .Values.generator.repo_url }}" + revision: "{{ .Values.generator.revision }}" + files: + - path: "{{ .Values.account.id }}/{{ .Values.cluster.id }}/*/ibm-mas-masapp-predict-install.yaml" + - git: + repoURL: "{{ .Values.generator.repo_url }}" + revision: "{{ .Values.generator.revision }}" + files: + - path: "{{ .Values.account.id }}/{{ .Values.cluster.id }}/*/ibm-mas-masapp-health-install.yaml" + - git: + repoURL: "{{ .Values.generator.repo_url }}" + revision: "{{ .Values.generator.revision }}" + files: + - path: "{{ .Values.account.id }}/{{ .Values.cluster.id }}/*/ibm-mas-masapp-configs.yaml" + - git: + repoURL: "{{ .Values.generator.repo_url }}" + revision: "{{ .Values.generator.revision }}" + files: + - path: "{{ .Values.account.id }}/{{ .Values.cluster.id }}/*/ibm-cp4d.yaml" + - git: + repoURL: "{{ .Values.generator.repo_url }}" + revision: "{{ .Values.generator.revision }}" + files: + - path: "{{ .Values.account.id }}/{{ .Values.cluster.id }}/*/ibm-wsl.yaml" + template: + metadata: + name: "instance.{{ .Values.cluster.id }}.{{ `{{.instance.id}}` }}" + labels: + environment: '{{ .Values.account.id }}' + region: '{{ .Values.region.id }}' + cluster: '{{ .Values.cluster.id }}' + instance: '{{ `{{.instance.id}}` }}' +{{- if .Values.custom_labels }} +{{ .Values.custom_labels | toYaml | indent 8 }} +{{- end }} + annotations: + healthCheckTimeout: "1800" + argocd.argoproj.io/sync-wave: "099" + {{- if and .Values.notifications .Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-failed.workspace1: {{ .Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-succeeded.workspace1: {{ .Values.notifications.slack_channel_id }} + {{- end }} + spec: + project: "{{ .Values.argo.projects.rootapps }}" + source: + repoURL: "{{ .Values.source.repo_url }}" + targetRevision: "{{ .Values.source.revision }}" + path: root-applications/ibm-mas-instance-root + helm: + releaseName: instanceappset + values: "{{ `{{ toYaml . }}` }}" + parameters: + - name: "generator.repo_url" + value: "{{ .Values.generator.repo_url }}" + - name: "generator.revision" + value: "{{ .Values.generator.revision }}" + - name: "source.revision" + value: "{{ .Values.source.revision }}" + - name: "source.repo_url" + value: "{{ .Values.source.repo_url }}" + - name: argo.namespace + value: "{{ .Values.argo.namespace }}" + {{- if and .Values.notifications .Values.notifications.slack_channel_id }} + - name: "notifications.slack_channel_id" + value: "{{ .Values.notifications.slack_channel_id }}" + {{- end }} + - name: argo.projects.rootapps + value: "{{ .Values.argo.projects.rootapps }}" + - name: argo.projects.apps + value: "{{ .Values.argo.projects.apps }}" + - name: avp.name + value: "{{ .Values.avp.name }}" + - name: avp.secret + value: "{{ .Values.avp.secret }}" + - name: avp.values_varname + value: "{{ .Values.avp.values_varname }}" + destination: + server: 'https://kubernetes.default.svc' + namespace: {{ .Values.argo.namespace }} + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=false + - RespectIgnoreDifferences=true + ignoreDifferences: + - group: '*' + kind: ServiceAccount + jsonPointers: + - /imagePullSecrets + - group: 'marketplace.redhat.com/v1alpha1' + kind: MarketplaceConfig + jsonPointers: + - /spec diff --git a/root-applications/ibm-mas-cluster-root/values.yaml b/root-applications/ibm-mas-cluster-root/values.yaml new file mode 100644 index 000000000..b5c3642f6 --- /dev/null +++ b/root-applications/ibm-mas-cluster-root/values.yaml @@ -0,0 +1,28 @@ +--- + + +avp: + name: "argocd-vault-plugin-helm" + secret: "" + values_varname: "HELM_VALUES" + + + +source: + repo_url: "https://github.com/ibm-mas/gitops" + revision: "poc" + +# Customers will definitely need to customise this (our gitops-envs/mas-dev repos are private), +# So deliberately not specifying defaults here; we want charts to refuse to render if these are not specified +# Both of these correspond to requirement arguments of the gitops-bootstrap CLI function (--github-url and --github-revision) +# generator: +# repo_url: "" +# revision: "" + +# These defaults align with the ArgoCD worker setup by gitops-bootstrap +# (openshift-gitops with a single ArgoCD project "mas") +argo: + namespace: "openshift-gitops" + projects: + rootapps: "mas" + apps: "mas" diff --git a/root-applications/ibm-mas-instance-root/Chart.yaml b/root-applications/ibm-mas-instance-root/Chart.yaml new file mode 100644 index 000000000..a7310f9f8 --- /dev/null +++ b/root-applications/ibm-mas-instance-root/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +name: ibm-mas-instance-root +description: IBM MAS instance root +type: application +version: 1.0.0 diff --git a/root-applications/ibm-mas-instance-root/README.md b/root-applications/ibm-mas-instance-root/README.md new file mode 100644 index 000000000..60b68a3c7 --- /dev/null +++ b/root-applications/ibm-mas-instance-root/README.md @@ -0,0 +1,3 @@ +IBM MAS Instance Root Application +=============================================================================== +Installs various ArgoCD Applications for managing instance-level MAS dependencies (e.g. SLS, DB2 Databases), MAS Core and MAS Applications (e.g. Manage, Monitor, etc) on the target cluster. \ No newline at end of file diff --git a/root-applications/ibm-mas-instance-root/templates/000-ibm-sync-resources.yaml b/root-applications/ibm-mas-instance-root/templates/000-ibm-sync-resources.yaml new file mode 100644 index 000000000..0c64e095f --- /dev/null +++ b/root-applications/ibm-mas-instance-root/templates/000-ibm-sync-resources.yaml @@ -0,0 +1,101 @@ +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: syncres.{{ .Values.cluster.id }}.{{ .Values.instance.id }} + namespace: {{ .Values.argo.namespace }} + labels: + environment: '{{ .Values.account.id }}' + region: '{{ .Values.region.id }}' + cluster: '{{ .Values.cluster.id }}' + instance: '{{ .Values.instance.id }}' + annotations: + argocd.argoproj.io/sync-wave: "000" + {{- if and .Values.notifications .Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-failed.workspace1: {{ .Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-succeeded.workspace1: {{ .Values.notifications.slack_channel_id }} + {{- end }} + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + ignoreDifferences: + - group: '*' + kind: ServiceAccount + jsonPointers: + - /imagePullSecrets + project: "{{ .Values.argo.projects.apps }}" + destination: + server: {{ .Values.cluster.url }} + namespace: default + source: + repoURL: "{{ .Values.source.repo_url }}" + path: instance-applications/000-ibm-sync-resources + targetRevision: "{{ .Values.source.revision }}" + plugin: + name: {{ .Values.avp.name }} + env: + - name: {{ .Values.avp.values_varname }} + value: | + account_id: "{{ .Values.account.id }}" + region_id: "{{ .Values.region.id }}" + cluster_id: "{{ .Values.cluster.id }}" + instance_id: "{{ .Values.instance.id }}" + argo_namespace: "{{ .Values.argo.namespace }}" + sm_aws_access_key_id: "{{ .Values.sm.aws_access_key_id }}" + sm_aws_secret_access_key: "{{ .Values.sm.aws_secret_access_key }}" + sm_aws_region: "{{ .Values.region.id }}" + {{- if not (empty .Values.ibm_sls) }} + {{- if eq .Values.ibm_sls.mongodb_provider "aws" }} + docdb: + master_username: "{{ .Values.ibm_sls.docdb_master_username }}" + master_password: "{{ .Values.ibm_sls.docdb_master_password }}" + instance_password: "{{ .Values.ibm_sls.sls_mongo_password }}" + master_info: {{ .Values.ibm_sls.docdb_master_info }} + {{- end }} + {{- end }} + + + mas_manual_cert_mgmt: "{{ .Values.ibm_mas_suite.mas_manual_cert_mgmt }}" + dns_provider: "{{ .Values.ibm_mas_suite.dns_provider }}" + mas_workspace_id: "{{ .Values.ibm_mas_suite.mas_workspace_id }}" + mas_config_dir: "{{ .Values.ibm_mas_suite.mas_config_dir }}" + mas_domain: "{{ .Values.ibm_mas_suite.mas_domain }}" + ocp_cluster_domain: "{{ .Values.ibm_mas_suite.ocp_cluster_domain }}" + cis_apikey: "{{ .Values.ibm_mas_suite.cis_apikey }}" + cis_crn: "{{ .Values.ibm_mas_suite.cis_crn }}" + + cis_email: "{{ .Values.ibm_mas_suite.cis_email }}" + cis_subdomain: "{{ .Values.ibm_mas_suite.cis_subdomain }}" + cis_mas_domain: "{{ .Values.ibm_mas_suite.cis_mas_domain }}" + cis_enhanced_security: "{{ .Values.ibm_mas_suite.cis_enhanced_security }}" + cis_waf: "{{ .Values.ibm_mas_suite.cis_waf }}" + cis_proxy: "{{ .Values.ibm_mas_suite.cis_proxy }}" + cis_service_name: "{{ .Values.ibm_mas_suite.cis_service_name }}" + update_dns_entries: "{{ .Values.ibm_mas_suite.update_dns_entries }}" + delete_wildcards: "{{ .Values.ibm_mas_suite.delete_wildcards }}" + override_edge_certs: "{{ .Values.ibm_mas_suite.override_edge_certs }}" + + + {{- if .Values.ibm_mas_suite.manual_certs }} + manual_certs: {{ .Values.ibm_mas_suite.manual_certs | toYaml | nindent 14 }} + {{- end }} + + + + {{- if .Values.custom_labels }} + custom_labels: {{ .Values.custom_labels | toYaml | nindent 14 }} + {{- end }} + + - name: ARGOCD_APP_NAME + value: syncres + {{- if not (empty .Values.avp.secret) }} + - name: AVP_SECRET + value: {{ .Values.avp.secret }} + {{- end }} + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=false + - RespectIgnoreDifferences=true diff --git a/root-applications/ibm-mas-instance-root/templates/010-ibm-sync-jobs.yaml b/root-applications/ibm-mas-instance-root/templates/010-ibm-sync-jobs.yaml new file mode 100644 index 000000000..8b47716e7 --- /dev/null +++ b/root-applications/ibm-mas-instance-root/templates/010-ibm-sync-jobs.yaml @@ -0,0 +1,105 @@ +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: syncjobs.{{ .Values.cluster.id }}.{{ .Values.instance.id }} + namespace: {{ .Values.argo.namespace }} + labels: + environment: '{{ .Values.account.id }}' + region: '{{ .Values.region.id }}' + cluster: '{{ .Values.cluster.id }}' + instance: '{{ .Values.instance.id }}' + annotations: + argocd.argoproj.io/sync-wave: "010" + {{- if and .Values.notifications .Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-failed.workspace1: {{ .Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-succeeded.workspace1: {{ .Values.notifications.slack_channel_id }} + {{- end }} + finalizers: + - resources-finalizer.argocd.argoproj.io + - post-delete-finalizer.argocd.argoproj.io + - post-delete-finalizer.argocd.argoproj.io/cleanup +spec: + ignoreDifferences: + - group: '*' + kind: ServiceAccount + jsonPointers: + - /imagePullSecrets + project: "{{ .Values.argo.projects.apps }}" + destination: + server: {{ .Values.cluster.url }} + namespace: default + source: + repoURL: "{{ .Values.source.repo_url }}" + path: instance-applications/010-ibm-sync-jobs + targetRevision: "{{ .Values.source.revision }}" + plugin: + name: {{ .Values.avp.name }} + env: + - name: {{ .Values.avp.values_varname }} + value: | + account_id: "{{ .Values.account.id }}" + region_id: "{{ .Values.region.id }}" + cluster_id: "{{ .Values.cluster.id }}" + instance_id: "{{ .Values.instance.id }}" + sm_aws_access_key_id: "{{ .Values.sm.aws_access_key_id }}" + sm_aws_secret_access_key: "{{ .Values.sm.aws_secret_access_key }}" + sm_aws_region: "{{ .Values.region.id }}" + {{- if not (empty .Values.ibm_cp4d) }} + cpd_product_version: "{{ .Values.ibm_cp4d.cpd_product_version }}" + {{- end }} + {{- if not (empty .Values.ibm_sls) }} + {{- if eq .Values.ibm_sls.mongodb_provider "aws" }} + docdb: + master_username: "{{ .Values.ibm_sls.docdb_master_username }}" + master_password: "{{ .Values.ibm_sls.docdb_master_password }}" + instance_password: "{{ .Values.ibm_sls.sls_mongo_password }}" + master_info: {{ .Values.ibm_sls.docdb_master_info }} + {{- end }} + {{- end }} + + + mas_manual_cert_mgmt: "{{ .Values.ibm_mas_suite.mas_manual_cert_mgmt }}" + dns_provider: "{{ .Values.ibm_mas_suite.dns_provider }}" + mas_workspace_id: "{{ .Values.ibm_mas_suite.mas_workspace_id }}" + mas_config_dir: "{{ .Values.ibm_mas_suite.mas_config_dir }}" + mas_domain: "{{ .Values.ibm_mas_suite.mas_domain }}" + ocp_cluster_domain: "{{ .Values.ibm_mas_suite.ocp_cluster_domain }}" + cis_apikey: "{{ .Values.ibm_mas_suite.cis_apikey }}" + cis_crn: "{{ .Values.ibm_mas_suite.cis_crn }}" + + cis_email: "{{ .Values.ibm_mas_suite.cis_email }}" + cis_subdomain: "{{ .Values.ibm_mas_suite.cis_subdomain }}" + cis_mas_domain: "{{ .Values.ibm_mas_suite.cis_mas_domain }}" + cis_enhanced_security: "{{ .Values.ibm_mas_suite.cis_enhanced_security }}" + cis_waf: "{{ .Values.ibm_mas_suite.cis_waf }}" + cis_proxy: "{{ .Values.ibm_mas_suite.cis_proxy }}" + cis_service_name: "{{ .Values.ibm_mas_suite.cis_service_name }}" + update_dns_entries: "{{ .Values.ibm_mas_suite.update_dns_entries }}" + delete_wildcards: "{{ .Values.ibm_mas_suite.delete_wildcards }}" + override_edge_certs: "{{ .Values.ibm_mas_suite.override_edge_certs }}" + + + {{- if .Values.ibm_mas_suite.manual_certs }} + manual_certs: {{ .Values.ibm_mas_suite.manual_certs | toYaml | nindent 14 }} + {{- end }} + + + + {{- if .Values.custom_labels }} + custom_labels: {{ .Values.custom_labels | toYaml | nindent 14 }} + {{- end }} + + - name: ARGOCD_APP_NAME + value: syncres + {{- if not (empty .Values.avp.secret) }} + - name: AVP_SECRET + value: {{ .Values.avp.secret }} + {{- end }} + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=false + - RespectIgnoreDifferences=true diff --git a/root-applications/ibm-mas-instance-root/templates/080-ibm-cp4d-app.yaml b/root-applications/ibm-mas-instance-root/templates/080-ibm-cp4d-app.yaml new file mode 100644 index 000000000..45d2c8847 --- /dev/null +++ b/root-applications/ibm-mas-instance-root/templates/080-ibm-cp4d-app.yaml @@ -0,0 +1,76 @@ +{{- if not (empty .Values.ibm_cp4d) }} +--- +# IBM Maximo Operator Catalog +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: cp4d.{{ .Values.cluster.id }}.{{ .Values.instance.id }} + namespace: {{ .Values.argo.namespace }} + labels: + environment: '{{ .Values.account.id }}' + region: '{{ .Values.region.id }}' + cluster: '{{ .Values.cluster.id }}' + instance: '{{ .Values.instance.id }}' + annotations: + argocd.argoproj.io/sync-wave: "080" + {{- if and .Values.notifications .Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-failed.workspace1: {{ .Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-succeeded.workspace1: {{ .Values.notifications.slack_channel_id }} + {{- end }} + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + ignoreDifferences: + - group: 'operator.ibm.com' + kind: IBMLicensing + jsonPointers: + - /spec/version + project: "{{ .Values.argo.projects.apps }}" + destination: + server: {{ .Values.cluster.url }} + namespace: default + source: + repoURL: "{{ .Values.source.repo_url }}" + path: instance-applications/080-ibm-cp4d + targetRevision: "{{ .Values.source.revision }}" + plugin: + name: {{ .Values.avp.name }} + env: + - name: {{ .Values.avp.values_varname }} + value: | + argo_namespace: "{{ .Values.argo.namespace }}" + instance_id: "{{ .Values.instance.id }}" + ibm_entitlement_key: "{{ .Values.ibm_cp4d.ibm_entitlement_key }}" + cpd_operators_namespace: "{{ .Values.ibm_cp4d.cpd_operators_namespace }}" + cpd_instance_namespace: "{{ .Values.ibm_cp4d.cpd_instance_namespace }}" + cpd_cs_control_namespace: "{{ .Values.ibm_cp4d.cpd_cs_control_namespace }}" + cpd_admin_login_sa: "{{ .Values.ibm_cp4d.cpd_admin_login_sa }}" + namespace_scope_channel: "{{ .Values.ibm_cp4d.namespace_scope_channel }}" + cpd_ibm_licensing_channel: "{{ .Values.ibm_cp4d.cpd_ibm_licensing_channel }}" + cpd_ibm_licensing_version: "{{ .Values.ibm_cp4d.cpd_ibm_licensing_version }}" + cpfs_channel: "{{ .Values.ibm_cp4d.cpfs_channel }}" + cpfs_size: "{{ .Values.ibm_cp4d.cpfs_size }}" + cpd_scale_config: "{{ .Values.ibm_cp4d.cpd_scale_config }}" + cpd_platform_channel: "{{ .Values.ibm_cp4d.cpd_platform_channel }}" + cpd_platform_cr_name: "{{ .Values.ibm_cp4d.cpd_platform_cr_name }}" + cpd_product_version: "{{ .Values.ibm_cp4d.cpd_product_version }}" + cpd_iam_integration: "{{ .Values.ibm_cp4d.cpd_iam_integration }}" + cpd_primary_storage_class: "{{ .Values.ibm_cp4d.cpd_primary_storage_class }}" + cpd_metadata_storage_class: "{{ .Values.ibm_cp4d.cpd_metadata_storage_class }}" + {{- if .Values.custom_labels }} + custom_labels: {{ .Values.custom_labels | toYaml | nindent 14 }} + {{- end }} + - name: ARGOCD_APP_NAME + value: cp4dapp + {{- if not (empty .Values.avp.secret) }} + - name: AVP_SECRET + value: {{ .Values.avp.secret }} + {{- end }} + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=false + - RespectIgnoreDifferences=true +{{- end }} \ No newline at end of file diff --git a/root-applications/ibm-mas-instance-root/templates/090-ibm-wsl-app.yaml b/root-applications/ibm-mas-instance-root/templates/090-ibm-wsl-app.yaml new file mode 100644 index 000000000..28ebc3615 --- /dev/null +++ b/root-applications/ibm-mas-instance-root/templates/090-ibm-wsl-app.yaml @@ -0,0 +1,78 @@ +{{- if not (empty .Values.ibm_wsl) }} +--- +# IBM Maximo Operator Catalog +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: wsl.{{ .Values.cluster.id }}.{{ .Values.instance.id }} + namespace: {{ .Values.argo.namespace }} + labels: + environment: '{{ .Values.account.id }}' + region: '{{ .Values.region.id }}' + cluster: '{{ .Values.cluster.id }}' + instance: '{{ .Values.instance.id }}' + annotations: + argocd.argoproj.io/sync-wave: "090" + {{- if and .Values.notifications .Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-failed.workspace1: {{ .Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-succeeded.workspace1: {{ .Values.notifications.slack_channel_id }} + {{- end }} + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + ignoreDifferences: + - group: '*' + kind: ServiceAccount + jsonPointers: + - /imagePullSecrets + project: "{{ .Values.argo.projects.apps }}" + destination: + server: {{ .Values.cluster.url }} + namespace: default + source: + repoURL: "{{ .Values.source.repo_url }}" + path: instance-applications/090-ibm-wsl + targetRevision: "{{ .Values.source.revision }}" + plugin: + name: {{ .Values.avp.name }} + env: + - name: {{ .Values.avp.values_varname }} + value: | + account_id: "{{ .Values.account.id }}" + region_id: "{{ .Values.region.id }}" + cluster_id: "{{ .Values.cluster.id }}" + instance_id: "{{ .Values.instance.id }}" + sm_aws_access_key_id: "{{ .Values.sm.aws_access_key_id }}" + sm_aws_secret_access_key: "{{ .Values.sm.aws_secret_access_key }}" + cpd_operators_namespace: "{{ .Values.ibm_cp4d.cpd_operators_namespace }}" + cpd_instance_namespace: "{{ .Values.ibm_cp4d.cpd_instance_namespace }}" + cpd_service_storage_class: "{{ .Values.ibm_wsl.cpd_service_storage_class }}" + cpd_service_block_storage_class: "{{ .Values.ibm_wsl.cpd_service_block_storage_class }}" + cpd_service_scale_config: "{{ .Values.ibm_wsl.cpd_service_scale_config }}" + wsl_version: "{{ .Values.ibm_wsl.wsl_version }}" + wsl_channel: "{{ .Values.ibm_wsl.wsl_channel }}" + ccs_channel: "{{ .Values.ibm_wsl.ccs_channel }}" + ccs_version: "{{ .Values.ibm_wsl.ccs_version }}" + datarefinery_channel: "{{ .Values.ibm_wsl.datarefinery_channel }}" + datarefinery_version: "{{ .Values.ibm_wsl.datarefinery_version }}" + ws_runtimes_channel: "{{ .Values.ibm_wsl.ws_runtimes_channel }}" + ws_runtimes_version: "{{ .Values.ibm_wsl.ws_runtimes_version }}" + opencontent_rabbitmq_channel: "{{ .Values.ibm_wsl.opencontent_rabbitmq_channel }}" + opencontent_elasticsearch_channel: "{{ .Values.ibm_wsl.opencontent_elasticsearch_channel }}" + {{- if .Values.custom_labels }} + custom_labels: {{ .Values.custom_labels | toYaml | nindent 14 }} + {{- end }} + - name: ARGOCD_APP_NAME + value: cpdwslapp + {{- if not (empty .Values.avp.secret) }} + - name: AVP_SECRET + value: {{ .Values.avp.secret }} + {{- end }} + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=false + - RespectIgnoreDifferences=true +{{- end }} \ No newline at end of file diff --git a/root-applications/ibm-mas-instance-root/templates/100-ibm-sls-app.yaml b/root-applications/ibm-mas-instance-root/templates/100-ibm-sls-app.yaml new file mode 100644 index 000000000..c2bebc16e --- /dev/null +++ b/root-applications/ibm-mas-instance-root/templates/100-ibm-sls-app.yaml @@ -0,0 +1,73 @@ +{{- if not (empty .Values.ibm_sls) }} +--- +# IBM Maximo Operator Catalog +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: sls.{{ .Values.cluster.id }}.{{ .Values.instance.id }} + namespace: {{ .Values.argo.namespace }} + labels: + environment: '{{ .Values.account.id }}' + region: '{{ .Values.region.id }}' + cluster: '{{ .Values.cluster.id }}' + instance: '{{ .Values.instance.id }}' + annotations: + argocd.argoproj.io/sync-wave: "100" + {{- if and .Values.notifications .Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-failed.workspace1: {{ .Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-succeeded.workspace1: {{ .Values.notifications.slack_channel_id }} + {{- end }} + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + ignoreDifferences: + - group: '*' + kind: ServiceAccount + jsonPointers: + - /imagePullSecrets + project: "{{ .Values.argo.projects.apps }}" + destination: + server: {{ .Values.cluster.url }} + namespace: default + source: + repoURL: "{{ .Values.source.repo_url }}" + path: instance-applications/100-ibm-sls + targetRevision: "{{ .Values.source.revision }}" + plugin: + name: {{ .Values.avp.name }} + env: + - name: {{ .Values.avp.values_varname }} + value: | + account_id: "{{ .Values.account.id }}" + region_id: "{{ .Values.region.id }}" + cluster_id: "{{ .Values.cluster.id }}" + instance_id: "{{ .Values.instance.id }}" + argo_namespace: "{{ .Values.argo.namespace }}" + sm_aws_access_key_id: "{{ .Values.sm.aws_access_key_id }}" + sm_aws_secret_access_key: "{{ .Values.sm.aws_secret_access_key }}" + run_sync_hooks: {{.Values.ibm_sls.run_sync_hooks }} + sls_channel: "{{ .Values.ibm_sls.sls_channel }}" + ibm_entitlement_key: "{{ .Values.ibm_sls.ibm_entitlement_key }}" + sls_mongo_secret_name: "{{ .Values.ibm_sls.sls_mongo_secret_name }}" + sls_mongo_username: "{{ .Values.ibm_sls.sls_mongo_username }}" + sls_mongo_password: "{{ .Values.ibm_sls.sls_mongo_password }}" + sls_entitlement_file: "{{ .Values.ibm_sls.sls_entitlement_file }}" + icr_cp_open: "{{ .Values.ibm_sls.icr_cp_open }}" + mongo_spec: {{ .Values.ibm_sls.mongo_spec | toYaml | nindent 14}} + {{- if .Values.custom_labels }} + custom_labels: {{ .Values.custom_labels | toYaml | nindent 14 }} + {{- end }} + - name: ARGOCD_APP_NAME + value: slsapp + {{- if not (empty .Values.avp.secret) }} + - name: AVP_SECRET + value: {{ .Values.avp.secret }} + {{- end }} + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=false + - RespectIgnoreDifferences=true +{{- end }} diff --git a/root-applications/ibm-mas-instance-root/templates/120-db2-databases-app.yaml b/root-applications/ibm-mas-instance-root/templates/120-db2-databases-app.yaml new file mode 100644 index 000000000..52ed41331 --- /dev/null +++ b/root-applications/ibm-mas-instance-root/templates/120-db2-databases-app.yaml @@ -0,0 +1,69 @@ +{{- /* +When inside the range loop below, make sure you prefix any references to chart values NOT under .Values.ibm_db2u_databases with $. +For example: {{ $.Values.account.id }} (instead of {{ .Values.account.id }} ) +*/}} + +{{- range $i, $value := .Values.ibm_db2u_databases }} +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: "db2-db.{{ $.Values.cluster.id }}.{{ $.Values.instance.id }}.{{ $value.mas_application_id }}" + namespace: {{ $.Values.argo.namespace }} + labels: + environment: '{{ $.Values.account.id }}' + region: '{{ $.Values.region.id }}' + cluster: '{{ $.Values.cluster.id }}' + instance: '{{ $.Values.instance.id }}' + appId: '{{ $value.mas_application_id }}' + annotations: + argocd.argoproj.io/sync-wave: "120" + {{- if and $.Values.notifications $.Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-failed.workspace1: {{ $.Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-succeeded.workspace1: {{ $.Values.notifications.slack_channel_id }} + {{- end }} + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + project: "{{ $.Values.argo.projects.apps }}" + destination: + server: {{ $.Values.cluster.url }} + namespace: default + source: + repoURL: "{{ $.Values.source.repo_url }}" + path: instance-applications/120-ibm-db2u-database + targetRevision: "{{ $.Values.source.revision }}" + plugin: + name: {{ $.Values.avp.name }} + env: + - name: {{ $.Values.avp.values_varname }} + value: | + account_id: "{{ $.Values.account.id }}" + region_id: "{{ $.Values.region.id }}" + cluster_id: "{{ $.Values.cluster.id }}" + instance_id: "{{ $.Values.instance.id }}" + sm_aws_access_key_id: "{{ $.Values.sm.aws_access_key_id }}" + sm_aws_secret_access_key: "{{ $.Values.sm.aws_secret_access_key }}" + {{- if $.Values.custom_labels }} + custom_labels: {{ $.Values.custom_labels | toYaml | nindent 14 }} + {{- end }} + {{ $value | toYaml | nindent 12 }} + - name: ARGOCD_APP_NAME + value: "db2dbapp-{{ $value.mas_application_id }}" + {{- if not (empty $.Values.avp.secret) }} + - name: AVP_SECRET + value: {{ $.Values.avp.secret }} + {{- end }} + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=false + - RespectIgnoreDifferences=true + ignoreDifferences: + - group: 'db2u.databases.ibm.com' + kind: Db2uCluster + jsonPointers: + - /spec/environment/database/ssl/secretName +{{- end }} \ No newline at end of file diff --git a/root-applications/ibm-mas-instance-root/templates/130-ibm-mas-suite-app.yaml b/root-applications/ibm-mas-instance-root/templates/130-ibm-mas-suite-app.yaml new file mode 100644 index 000000000..16d4eeef6 --- /dev/null +++ b/root-applications/ibm-mas-instance-root/templates/130-ibm-mas-suite-app.yaml @@ -0,0 +1,134 @@ +{{- if not (empty .Values.ibm_mas_suite) }} + +{{ $app_name := printf "suite.%s.%s" .Values.cluster.id .Values.instance.id }} +{{ $app_dest_ns := printf "mas-%s-core" .Values.instance.id }} + +--- +# IBM Maximo Operator Catalog +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: {{ $app_name }} + namespace: {{ .Values.argo.namespace }} + labels: + environment: '{{ .Values.account.id }}' + region: '{{ .Values.region.id }}' + cluster: '{{ .Values.cluster.id }}' + instance: '{{ .Values.instance.id }}' + annotations: + argocd.argoproj.io/sync-wave: "130" + {{- if and .Values.notifications .Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-failed.workspace1: {{ .Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-succeeded.workspace1: {{ .Values.notifications.slack_channel_id }} + {{- end }} + argocd.argoproj.io/sync-options: PruneLast=true + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + project: "{{ .Values.argo.projects.apps }}" + destination: + server: {{ .Values.cluster.url }} + namespace: {{ $app_dest_ns }} + source: + repoURL: "{{ .Values.source.repo_url }}" + path: instance-applications/130-ibm-mas-suite + targetRevision: "{{ .Values.source.revision }}" + plugin: + name: {{ .Values.avp.name }} + env: + - name: {{ .Values.avp.values_varname }} + value: | + account_id: "{{ .Values.account.id }}" + region_id: "{{ .Values.region.id }}" + cluster_id: "{{ .Values.cluster.id }}" + instance_id: "{{ .Values.instance.id }}" + sm_aws_access_key_id: "{{ .Values.sm.aws_access_key_id }}" + sm_aws_secret_access_key: "{{ .Values.sm.aws_secret_access_key }}" + sm_aws_region: "{{ .Values.region.id }}" + mas_workspace_id: "{{ .Values.ibm_mas_suite.mas_workspace_id }}" + mas_domain: "{{ .Values.ibm_mas_suite.mas_domain }}" + mas_config_dir: "{{ .Values.ibm_mas_suite.mas_config_dir }}" + mas_channel: "{{ .Values.ibm_mas_suite.mas_channel }}" + ibm_entitlement_key: "{{ .Values.ibm_mas_suite.ibm_entitlement_key }}" + domain: "{{ .Values.ibm_mas_suite.domain }}" + cert_manager_namespace: "{{ .Values.ibm_mas_suite.cert_manager_namespace }}" + mas_manual_cert_mgmt: "{{.Values.ibm_mas_suite.mas_manual_cert_mgmt }}" + dns_provider: "{{ .Values.ibm_mas_suite.dns_provider }}" + icr_cp: "{{ .Values.ibm_mas_suite.icr_cp }}" + icr_cp_open: "{{ .Values.ibm_mas_suite.icr_cp_open }}" + + {{- if .Values.ibm_mas_suite.mas_annotations }} + mas_annotations: {{ .Values.ibm_mas_suite.mas_annotations | toYaml | nindent 14 }} + {{- end }} + + {{- if .Values.ibm_mas_suite.mas_labels }} + mas_labels: {{ .Values.ibm_mas_suite.mas_labels | toYaml | nindent 14 }} + {{- end }} + + {{- if .Values.ibm_mas_suite.mas_image_tags }} + mas_image_tags: {{ .Values.ibm_mas_suite.mas_image_tags | toYaml | nindent 14 }} + {{- end }} + + {{- if .Values.custom_labels }} + custom_labels: {{ .Values.custom_labels | toYaml | nindent 14 }} + {{- end }} + + {{- if and (eq .Values.ibm_mas_suite.dns_provider "cis") (eq .Values.ibm_mas_suite.mas_manual_cert_mgmt "False") }} + cis_apikey: "{{ .Values.ibm_mas_suite.cis_apikey }}" + ocp_cluster_domain: "{{ .Values.ibm_mas_suite.ocp_cluster_domain }}" + cis_email: "{{ .Values.ibm_mas_suite.cis_email }}" + cis_crn: "{{ .Values.ibm_mas_suite.cis_crn }}" + cis_subdomain: "{{ .Values.ibm_mas_suite.cis_subdomain }}" + cis_mas_domain: "{{ .Values.ibm_mas_suite.cis_mas_domain }}" + cis_enhanced_security: "{{ .Values.ibm_mas_suite.cis_enhanced_security }}" + cis_waf: "{{ .Values.ibm_mas_suite.cis_waf }}" + cis_proxy: "{{ .Values.ibm_mas_suite.cis_proxy }}" + cis_service_name: "{{ .Values.ibm_mas_suite.cis_service_name }}" + update_dns_entries: "{{ .Values.ibm_mas_suite.update_dns_entries }}" + delete_wildcards: "{{ .Values.ibm_mas_suite.delete_wildcards }}" + override_edge_certs: "{{ .Values.ibm_mas_suite.override_edge_certs }}" + {{- end }} + + {{- if eq .Values.ibm_mas_suite.mas_manual_cert_mgmt "True" }} + + {{- if .Values.ibm_mas_suite.ca_cert }} + ca_cert: | + {{ .Values.ibm_mas_suite.ca_cert }} + {{- end }} + {{- if .Values.ibm_mas_suite.tls_cert }} + tls_cert: | + {{ .Values.ibm_mas_suite.tls_cert }} + {{- end }} + {{- if .Values.ibm_mas_suite.tls_key }} + tls_key: | + {{ .Values.ibm_mas_suite.tls_key }} + {{- end }} + + {{- end }} + - name: ARGOCD_APP_NAME + value: suiteapp + {{- if not (empty .Values.avp.secret) }} + - name: AVP_SECRET + value: {{ .Values.avp.secret }} + {{- end }} + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true + - RespectIgnoreDifferences=true + managedNamespaceMetadata: + labels: + argocd.argoproj.io/instance: {{ $app_name }} + argocd.argoproj.io/managed-by: "{{ .Values.argo.namespace }}" + annotations: + argocd.argoproj.io/tracking-id: >- + {{ $app_name }}:/Namespace:{{ $app_dest_ns }}/{{ $app_dest_ns }} + ignoreDifferences: + - group: 'cert-manager.io' + kind: ClusterIssuer + jsonPointers: + - /spec/acme/preferredChain + - /spec/duration +{{- end }} \ No newline at end of file diff --git a/root-applications/ibm-mas-instance-root/templates/130-ibm-mas-suite-configs-app.yaml b/root-applications/ibm-mas-instance-root/templates/130-ibm-mas-suite-configs-app.yaml new file mode 100644 index 000000000..e83248718 --- /dev/null +++ b/root-applications/ibm-mas-instance-root/templates/130-ibm-mas-suite-configs-app.yaml @@ -0,0 +1,70 @@ +{{- /* +When inside the range loop below, make sure you prefix any references to chart values NOT under .Values.ibm_mas_suite_configs with $. +For example: {{ $.Values.account.id }} (instead of {{ .Values.account.id }} ) +*/}} + +{{- range $i, $value := .Values.ibm_mas_suite_configs }} +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: "{{ $value.mas_config_name }}.{{ $.Values.cluster.id }}" + namespace: {{ $.Values.argo.namespace }} + labels: + environment: '{{ $.Values.account.id }}' + region: '{{ $.Values.region.id }}' + cluster: '{{ $.Values.cluster.id }}' + instance: '{{ $.Values.instance.id }}' + annotations: + argocd.argoproj.io/sync-wave: "130" + {{- if and $.Values.notifications $.Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-failed.workspace1: {{ $.Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-succeeded.workspace1: {{ $.Values.notifications.slack_channel_id }} + {{- end }} + finalizers: + - resources-finalizer.argocd.argoproj.io + - post-delete-finalizer.argocd.argoproj.io + - post-delete-finalizer.argocd.argoproj.io/cleanup + labels: + environment: '{{ $.Values.account.id }}' + region: '{{ $.Values.region.id }}' + cluster: '{{ $.Values.cluster.id }}' + instance: '{{ $.Values.instance.id }}' +spec: + project: "{{ $.Values.argo.projects.apps }}" + destination: + server: {{ $.Values.cluster.url }} + namespace: default + source: + repoURL: "{{ $.Values.source.repo_url }}" + path: instance-applications/130-{{ $value.mas_config_chart }} + targetRevision: "{{ $.Values.source.revision }}" + plugin: + name: {{ $.Values.avp.name }} + env: + - name: {{ $.Values.avp.values_varname }} + value: | + account_id: "{{ $.Values.account.id }}" + region_id: "{{ $.Values.region.id }}" + cluster_id: "{{ $.Values.cluster.id }}" + instance_id: "{{ $.Values.instance.id }}" + sm_aws_access_key_id: "{{ $.Values.sm.aws_access_key_id }}" + sm_aws_secret_access_key: "{{ $.Values.sm.aws_secret_access_key }}" + {{- if $.Values.custom_labels }} + custom_labels: {{ $.Values.custom_labels | toYaml | nindent 14 }} + {{- end }} + {{ $value | toYaml | nindent 12 }} + - name: ARGOCD_APP_NAME + value: {{ $value.mas_config_name }} + {{- if not (empty $.Values.avp.secret) }} + - name: AVP_SECRET + value: {{ $.Values.avp.secret }} + {{- end }} + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=false + - RespectIgnoreDifferences=true +{{- end }} \ No newline at end of file diff --git a/root-applications/ibm-mas-instance-root/templates/200-ibm-mas-workspaces.yaml b/root-applications/ibm-mas-instance-root/templates/200-ibm-mas-workspaces.yaml new file mode 100644 index 000000000..0c67bbd33 --- /dev/null +++ b/root-applications/ibm-mas-instance-root/templates/200-ibm-mas-workspaces.yaml @@ -0,0 +1,60 @@ +{{- /* +When inside the range loop below, make sure you prefix any references to chart values NOT under .Values.ibm_db2u_databases with $. +For example: {{ $.Values.account.id }} (instead of {{ .Values.account.id }} ) +*/}} + +{{- range $i, $value := .Values.ibm_mas_workspaces }} +--- +# IBM Maximo Operator Catalog +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: {{ $value.mas_workspace_id }}.suite.{{ $.Values.cluster.id }}.{{ $.Values.instance.id }} + namespace: {{ $.Values.argo.namespace }} + labels: + environment: '{{ $.Values.account.id }}' + region: '{{ $.Values.region.id }}' + cluster: '{{ $.Values.cluster.id }}' + instance: '{{ $.Values.instance.id }}' + annotations: + argocd.argoproj.io/sync-wave: "200" + {{- if and $.Values.notifications $.Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-failed.workspace1: {{ $.Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-succeeded.workspace1: {{ $.Values.notifications.slack_channel_id }} + {{- end }} + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + project: "{{ $.Values.argo.projects.apps }}" + destination: + server: {{ $.Values.cluster.url }} + namespace: default + source: + repoURL: "{{ $.Values.source.repo_url }}" + path: instance-applications/220-ibm-mas-workspace + targetRevision: "{{ $.Values.source.revision }}" + plugin: + name: {{ $.Values.avp.name }} + env: + - name: {{ $.Values.avp.values_varname }} + value: | + instance_id: "{{ $.Values.instance.id }}" + mas_workspace_id: "{{ $value.mas_workspace_id }}" + mas_workspace_name: "{{ $value.mas_workspace_name }}" + {{- if $.Values.custom_labels }} + custom_labels: {{ $.Values.custom_labels | toYaml | nindent 14 }} + {{- end }} + - name: ARGOCD_APP_NAME + value: "workspaceapp-{{ $value.mas_workspace_id }}" + {{- if not (empty $.Values.avp.secret) }} + - name: AVP_SECRET + value: {{ $.Values.avp.secret }} + {{- end }} + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=false + - RespectIgnoreDifferences=true +{{- end }} diff --git a/root-applications/ibm-mas-instance-root/templates/500-ibm-mas-masapp-assist-install.yaml b/root-applications/ibm-mas-instance-root/templates/500-ibm-mas-masapp-assist-install.yaml new file mode 100644 index 000000000..7c1e56767 --- /dev/null +++ b/root-applications/ibm-mas-instance-root/templates/500-ibm-mas-masapp-assist-install.yaml @@ -0,0 +1,76 @@ +{{- if not (empty .Values.ibm_suite_app_assist_install) }} +--- +# IBM Maximo Operator Catalog +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: assist.{{ .Values.cluster.id }}.{{ .Values.instance.id }} + namespace: {{ .Values.argo.namespace }} + labels: + environment: '{{ .Values.account.id }}' + region: '{{ .Values.region.id }}' + cluster: '{{ .Values.cluster.id }}' + instance: '{{ .Values.instance.id }}' + masapp: 'assist' + annotations: + argocd.argoproj.io/sync-wave: "500" + {{- if and .Values.notifications .Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-failed.workspace1: {{ .Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-succeeded.workspace1: {{ .Values.notifications.slack_channel_id }} + {{- end }} + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + project: "{{ .Values.argo.projects.apps }}" + destination: + server: {{ .Values.cluster.url }} + namespace: default + source: + repoURL: "{{ .Values.source.repo_url }}" + path: instance-applications/500-540-ibm-mas-suite-app-install + targetRevision: "{{ .Values.source.revision }}" + plugin: + name: {{ .Values.avp.name }} + env: + - name: {{ .Values.avp.values_varname }} + value: | + instance_id: "{{ .Values.instance.id }}" + argo_namespace: "{{ .Values.argo.namespace }}" + mas_app_api_version: "{{ .Values.ibm_suite_app_assist_install.mas_app_api_version }}" + mas_app_kind: "{{ .Values.ibm_suite_app_assist_install.mas_app_kind }}" + mas_app_namespace: "{{ .Values.ibm_suite_app_assist_install.mas_app_namespace }}" + ibm_entitlement_key: "{{ .Values.ibm_suite_app_assist_install.ibm_entitlement_key }}" + mas_app_id: "{{ .Values.ibm_suite_app_assist_install.mas_app_id }}" + mas_app_catalog_source: "{{ .Values.ibm_suite_app_assist_install.mas_app_catalog_source }}" + mas_app_channel: "{{ .Values.ibm_suite_app_assist_install.mas_app_channel }}" + mas_edition: "{{ .Values.ibm_suite_app_assist_install.mas_edition }}" + run_sync_hooks: {{ .Values.ibm_suite_app_assist_install.run_sync_hooks }} + mas_manual_cert_mgmt: "{{ .Values.ibm_suite_app_assist_install.mas_manual_cert_mgmt }}" + {{- if eq .Values.ibm_suite_app_assist_install.mas_manual_cert_mgmt true }} + public_tls_secret_name: "{{ .Values.ibm_suite_app_assist_install.public_tls_secret_name }}" + ca_cert: | + {{ .Values.ibm_suite_app_assist_install.ca_cert }} + tls_cert: | + {{ .Values.ibm_suite_app_assist_install.tls_cert }} + tls_key: | + {{ .Values.ibm_suite_app_assist_install.tls_key }} + {{- end }} + {{- if .Values.custom_labels }} + custom_labels: {{ .Values.custom_labels | toYaml | nindent 14 }} + {{- end }} + mas_app_spec: {{ .Values.ibm_suite_app_assist_install.mas_app_spec | toYaml | nindent 14 }} + - name: ARGOCD_APP_NAME + value: "assist-install" + {{- if not (empty .Values.avp.secret) }} + - name: AVP_SECRET + value: {{ .Values.avp.secret }} + {{- end }} + syncPolicy: + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=false + - RespectIgnoreDifferences=true +{{- end }} diff --git a/root-applications/ibm-mas-instance-root/templates/500-ibm-mas-masapp-iot-install.yaml b/root-applications/ibm-mas-instance-root/templates/500-ibm-mas-masapp-iot-install.yaml new file mode 100644 index 000000000..8c6e6cbbd --- /dev/null +++ b/root-applications/ibm-mas-instance-root/templates/500-ibm-mas-masapp-iot-install.yaml @@ -0,0 +1,75 @@ +{{- if not (empty .Values.ibm_suite_app_iot_install) }} +--- +# IBM Maximo Operator Catalog +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: iot.{{ .Values.cluster.id }}.{{ .Values.instance.id }} + namespace: {{ .Values.argo.namespace }} + labels: + environment: '{{ .Values.account.id }}' + region: '{{ .Values.region.id }}' + cluster: '{{ .Values.cluster.id }}' + instance: '{{ .Values.instance.id }}' + masapp: 'iot' + annotations: + argocd.argoproj.io/sync-wave: "500" + {{- if and .Values.notifications .Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-failed.workspace1: {{ .Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-succeeded.workspace1: {{ .Values.notifications.slack_channel_id }} + {{- end }} + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + project: "{{ .Values.argo.projects.apps }}" + destination: + server: {{ .Values.cluster.url }} + namespace: default + source: + repoURL: "{{ .Values.source.repo_url }}" + path: instance-applications/500-540-ibm-mas-suite-app-install + targetRevision: "{{ .Values.source.revision }}" + plugin: + name: {{ .Values.avp.name }} + env: + - name: {{ .Values.avp.values_varname }} + value: | + instance_id: "{{ .Values.instance.id }}" + argo_namespace: "{{ .Values.argo.namespace }}" + mas_app_api_version: "{{ .Values.ibm_suite_app_iot_install.mas_app_api_version }}" + mas_app_kind: "{{ .Values.ibm_suite_app_iot_install.mas_app_kind }}" + mas_app_namespace: "{{ .Values.ibm_suite_app_iot_install.mas_app_namespace }}" + ibm_entitlement_key: "{{ .Values.ibm_suite_app_iot_install.ibm_entitlement_key }}" + mas_app_id: "{{ .Values.ibm_suite_app_iot_install.mas_app_id }}" + mas_app_catalog_source: "{{ .Values.ibm_suite_app_iot_install.mas_app_catalog_source }}" + mas_app_channel: "{{ .Values.ibm_suite_app_iot_install.mas_app_channel }}" + mas_edition: "{{ .Values.ibm_suite_app_iot_install.mas_edition }}" + run_sync_hooks: {{ .Values.ibm_suite_app_iot_install.run_sync_hooks }} + mas_manual_cert_mgmt: "{{ .Values.ibm_suite_app_iot_install.mas_manual_cert_mgmt }}" + {{- if eq .Values.ibm_suite_app_iot_install.mas_manual_cert_mgmt true }} + public_tls_secret_name: "{{ .Values.ibm_suite_app_iot_install.public_tls_secret_name }}" + ca_cert: | + {{ .Values.ibm_suite_app_iot_install.ca_cert }} + tls_cert: | + {{ .Values.ibm_suite_app_iot_install.tls_cert }} + tls_key: | + {{ .Values.ibm_suite_app_iot_install.tls_key }} + {{- end }} + mas_app_spec: {{ .Values.ibm_suite_app_iot_install.mas_app_spec | toYaml | nindent 14 }} + {{- if .Values.custom_labels }} + custom_labels: {{ .Values.custom_labels | toYaml | nindent 14 }} + {{- end }} + - name: ARGOCD_APP_NAME + value: "iot-install" + {{- if not (empty .Values.avp.secret) }} + - name: AVP_SECRET + value: {{ .Values.avp.secret }} + {{- end }} + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=false + - RespectIgnoreDifferences=true +{{- end }} diff --git a/root-applications/ibm-mas-instance-root/templates/500-ibm-mas-masapp-manage-install.yaml b/root-applications/ibm-mas-instance-root/templates/500-ibm-mas-masapp-manage-install.yaml new file mode 100644 index 000000000..952618958 --- /dev/null +++ b/root-applications/ibm-mas-instance-root/templates/500-ibm-mas-masapp-manage-install.yaml @@ -0,0 +1,75 @@ +{{- if not (empty .Values.ibm_suite_app_manage_install) }} +--- +# IBM Maximo Operator Catalog +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: manage.{{ .Values.cluster.id }}.{{ .Values.instance.id }} + namespace: {{ .Values.argo.namespace }} + labels: + environment: '{{ .Values.account.id }}' + region: '{{ .Values.region.id }}' + cluster: '{{ .Values.cluster.id }}' + instance: '{{ .Values.instance.id }}' + masapp: 'manage' + annotations: + argocd.argoproj.io/sync-wave: "500" + {{- if and .Values.notifications .Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-failed.workspace1: {{ .Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-succeeded.workspace1: {{ .Values.notifications.slack_channel_id }} + {{- end }} + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + project: "{{ .Values.argo.projects.apps }}" + destination: + server: {{ .Values.cluster.url }} + namespace: default + source: + repoURL: "{{ .Values.source.repo_url }}" + path: instance-applications/500-540-ibm-mas-suite-app-install + targetRevision: "{{ .Values.source.revision }}" + plugin: + name: {{ .Values.avp.name }} + env: + - name: {{ .Values.avp.values_varname }} + value: | + instance_id: "{{ .Values.instance.id }}" + argo_namespace: "{{ .Values.argo.namespace }}" + mas_app_api_version: "{{ .Values.ibm_suite_app_manage_install.mas_app_api_version }}" + mas_app_kind: "{{ .Values.ibm_suite_app_manage_install.mas_app_kind }}" + mas_app_namespace: "{{ .Values.ibm_suite_app_manage_install.mas_app_namespace }}" + ibm_entitlement_key: "{{ .Values.ibm_suite_app_manage_install.ibm_entitlement_key }}" + mas_app_id: "{{ .Values.ibm_suite_app_manage_install.mas_app_id }}" + mas_app_catalog_source: "{{ .Values.ibm_suite_app_manage_install.mas_app_catalog_source }}" + mas_app_channel: "{{ .Values.ibm_suite_app_manage_install.mas_app_channel }}" + mas_edition: "{{ .Values.ibm_suite_app_manage_install.mas_edition }}" + run_sync_hooks: {{ .Values.ibm_suite_app_manage_install.run_sync_hooks }} + mas_manual_cert_mgmt: "{{ .Values.ibm_suite_app_manage_install.mas_manual_cert_mgmt }}" + {{- if eq .Values.ibm_suite_app_manage_install.mas_manual_cert_mgmt true }} + public_tls_secret_name: "{{ .Values.ibm_suite_app_manage_install.public_tls_secret_name }}" + ca_cert: | + {{ .Values.ibm_suite_app_manage_install.ca_cert }} + tls_cert: | + {{ .Values.ibm_suite_app_manage_install.tls_cert }} + tls_key: | + {{ .Values.ibm_suite_app_manage_install.tls_key }} + {{- end }} + mas_app_spec: {{ .Values.ibm_suite_app_manage_install.mas_app_spec | toYaml | nindent 14 }} + {{- if .Values.custom_labels }} + custom_labels: {{ .Values.custom_labels | toYaml | nindent 14 }} + {{- end }} + - name: ARGOCD_APP_NAME + value: "manage-install" + {{- if not (empty .Values.avp.secret) }} + - name: AVP_SECRET + value: {{ .Values.avp.secret }} + {{- end }} + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=false + - RespectIgnoreDifferences=true +{{- end }} diff --git a/root-applications/ibm-mas-instance-root/templates/500-ibm-mas-masapp-visualinspection-install.yaml b/root-applications/ibm-mas-instance-root/templates/500-ibm-mas-masapp-visualinspection-install.yaml new file mode 100644 index 000000000..638c49d3d --- /dev/null +++ b/root-applications/ibm-mas-instance-root/templates/500-ibm-mas-masapp-visualinspection-install.yaml @@ -0,0 +1,75 @@ +{{- if not (empty .Values.ibm_suite_app_visualinspection_install) }} +--- +# IBM Maximo Operator Catalog +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: visualinspection.{{ .Values.cluster.id }}.{{ .Values.instance.id }} + namespace: {{ .Values.argo.namespace }} + labels: + environment: '{{ .Values.account.id }}' + region: '{{ .Values.region.id }}' + cluster: '{{ .Values.cluster.id }}' + instance: '{{ .Values.instance.id }}' + masapp: 'visualinspection' + annotations: + argocd.argoproj.io/sync-wave: "500" + {{- if and .Values.notifications .Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-failed.workspace1: {{ .Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-succeeded.workspace1: {{ .Values.notifications.slack_channel_id }} + {{- end }} + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + project: "{{ .Values.argo.projects.apps }}" + destination: + server: {{ .Values.cluster.url }} + namespace: default + source: + repoURL: "{{ .Values.source.repo_url }}" + path: instance-applications/500-540-ibm-mas-suite-app-install + targetRevision: "{{ .Values.source.revision }}" + plugin: + name: {{ .Values.avp.name }} + env: + - name: {{ .Values.avp.values_varname }} + value: | + instance_id: "{{ .Values.instance.id }}" + argo_namespace: "{{ .Values.argo.namespace }}" + mas_app_api_version: "{{ .Values.ibm_suite_app_visualinspection_install.mas_app_api_version }}" + mas_app_kind: "{{ .Values.ibm_suite_app_visualinspection_install.mas_app_kind }}" + mas_app_namespace: "{{ .Values.ibm_suite_app_visualinspection_install.mas_app_namespace }}" + ibm_entitlement_key: "{{ .Values.ibm_suite_app_visualinspection_install.ibm_entitlement_key }}" + mas_app_id: "{{ .Values.ibm_suite_app_visualinspection_install.mas_app_id }}" + mas_app_catalog_source: "{{ .Values.ibm_suite_app_visualinspection_install.mas_app_catalog_source }}" + mas_app_channel: "{{ .Values.ibm_suite_app_visualinspection_install.mas_app_channel }}" + mas_edition: "{{ .Values.ibm_suite_app_visualinspection_install.mas_edition }}" + run_sync_hooks: {{ .Values.ibm_suite_app_visualinspection_install.run_sync_hooks }} + mas_manual_cert_mgmt: "{{ .Values.ibm_suite_app_visualinspection_install.mas_manual_cert_mgmt }}" + {{- if eq .Values.ibm_suite_app_visualinspection_install.mas_manual_cert_mgmt true }} + public_tls_secret_name: "{{ .Values.ibm_suite_app_visualinspection_install.public_tls_secret_name }}" + ca_cert: | + {{ .Values.ibm_suite_app_visualinspection_install.ca_cert }} + tls_cert: | + {{ .Values.ibm_suite_app_visualinspection_install.tls_cert }} + tls_key: | + {{ .Values.ibm_suite_app_visualinspection_install.tls_key }} + {{- end }} + mas_app_spec: {{ .Values.ibm_suite_app_visualinspection_install.mas_app_spec | toYaml | nindent 14 }} + {{- if .Values.custom_labels }} + custom_labels: {{ .Values.custom_labels | toYaml | nindent 14 }} + {{- end }} + - name: ARGOCD_APP_NAME + value: "visualinspection-install" + {{- if not (empty .Values.avp.secret) }} + - name: AVP_SECRET + value: {{ .Values.avp.secret }} + {{- end }} + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=false + - RespectIgnoreDifferences=true +{{- end }} diff --git a/root-applications/ibm-mas-instance-root/templates/510-550-ibm-mas-masapp-configs.yaml b/root-applications/ibm-mas-instance-root/templates/510-550-ibm-mas-masapp-configs.yaml new file mode 100644 index 000000000..f86493633 --- /dev/null +++ b/root-applications/ibm-mas-instance-root/templates/510-550-ibm-mas-masapp-configs.yaml @@ -0,0 +1,108 @@ +{{- /* +When inside the range loop below, make sure you prefix any references to chart values NOT under .Values.ibm_mas_masapp_configs with $. +For example: {{ $.Values.account.id }} (instead of {{ .Values.account.id }} ) +*/}} + +{{- range $i, $value := .Values.ibm_mas_masapp_configs }} +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: "{{ $value.mas_workspace_id }}.{{ $value.mas_app_id }}.{{ $.Values.cluster.id }}.{{ $.Values.instance.id }}" + namespace: {{ $.Values.argo.namespace }} + annotations: + {{- if eq $value.mas_app_id "assist" }} + argocd.argoproj.io/sync-wave: "510" + {{- else if eq $value.mas_app_id "iot" }} + argocd.argoproj.io/sync-wave: "510" + {{- else if eq $value.mas_app_id "manage" }} + argocd.argoproj.io/sync-wave: "510" + {{- else if eq $value.mas_app_id "visualinspection" }} + argocd.argoproj.io/sync-wave: "510" + {{- else if eq $value.mas_app_id "health" }} + argocd.argoproj.io/sync-wave: "530" + {{- else if eq $value.mas_app_id "monitor" }} + argocd.argoproj.io/sync-wave: "530" + {{- else if eq $value.mas_app_id "optimizer" }} + argocd.argoproj.io/sync-wave: "530" + {{- else if eq $value.mas_app_id "predict" }} + argocd.argoproj.io/sync-wave: "550" + {{- end }} + {{- if and $.Values.notifications $.Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-failed.workspace1: {{ $.Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-succeeded.workspace1: {{ $.Values.notifications.slack_channel_id }} + {{- end }} + finalizers: + - resources-finalizer.argocd.argoproj.io + labels: + environment: '{{ $.Values.account.id }}' + region: '{{ $.Values.region.id }}' + cluster: '{{ $.Values.cluster.id }}' + instance: '{{ $.Values.instance.id }}' +spec: + project: "{{ $.Values.argo.projects.apps }}" + destination: + server: {{ $.Values.cluster.url }} + namespace: default + source: + repoURL: "{{ $.Values.source.repo_url }}" + path: instance-applications/510-550-ibm-mas-suite-app-config + targetRevision: "{{ $.Values.source.revision }}" + plugin: + name: {{ $.Values.avp.name }} + env: + - name: {{ $.Values.avp.values_varname }} + value: | + instance_id: "{{ $.Values.instance.id }}" + mas_app_id: "{{ $value.mas_app_id }}" + mas_workspace_id: "{{ $value.mas_workspace_id }}" + mas_app_namespace: "{{ $value.mas_app_namespace }}" + mas_app_ws_apiversion: "{{ $value.mas_app_ws_apiversion }}" + mas_app_ws_kind: "{{ $value.mas_app_ws_kind }}" + mas_appws_spec: {{ $value.mas_appws_spec | toYaml | nindent 14 }} + + mas_manual_cert_mgmt: "{{ $value.mas_manual_cert_mgmt }}" + {{- if eq $value.mas_manual_cert_mgmt true }} + public_tls_secret_name: "{{ $value.public_tls_secret_name }}" + ca_cert: | + {{ $value.ca_cert }} + tls_cert: | + {{ $value.tls_cert }} + tls_key: | + {{ $value.tls_key }} + {{- end }} + + {{- if (or (eq $value.mas_app_id "manage") (eq $value.mas_app_id "health")) }} + mas_app_server_bundles_combined_add_server_config: {{ $value.mas_app_server_bundles_combined_add_server_config | toYaml | nindent 14 }} + + customization_archive_secret_names: {{ $value.customization_archive_secret_names | toYaml | nindent 14 }} + + {{- end }} + + {{- if $.Values.custom_labels }} + custom_labels: {{ $.Values.custom_labels | toYaml | nindent 14 }} + {{- end }} + + - name: ARGOCD_APP_NAME + value: "ma-cfg-{{ $value.mas_app_id }}" + {{- if not (empty $.Values.avp.secret) }} + - name: AVP_SECRET + value: {{ $.Values.avp.secret }} + {{- end }} + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=false + - RespectIgnoreDifferences=true + ignoreDifferences: + - group: '*' + kind: ServiceAccount + jsonPointers: + - /imagePullSecrets + - group: 'marketplace.redhat.com/v1alpha1' + kind: MarketplaceConfig + jsonPointers: + - /spec +{{- end }} diff --git a/root-applications/ibm-mas-instance-root/templates/520-ibm-mas-masapp-health-install.yaml b/root-applications/ibm-mas-instance-root/templates/520-ibm-mas-masapp-health-install.yaml new file mode 100644 index 000000000..b3a2f86bb --- /dev/null +++ b/root-applications/ibm-mas-instance-root/templates/520-ibm-mas-masapp-health-install.yaml @@ -0,0 +1,75 @@ +{{- if not (empty .Values.ibm_suite_app_health_install) }} +--- +# IBM Maximo Operator Catalog +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: health.{{ .Values.cluster.id }}.{{ .Values.instance.id }} + namespace: {{ .Values.argo.namespace }} + labels: + environment: '{{ .Values.account.id }}' + region: '{{ .Values.region.id }}' + cluster: '{{ .Values.cluster.id }}' + instance: '{{ .Values.instance.id }}' + masapp: 'health' + annotations: + argocd.argoproj.io/sync-wave: "520" + {{- if and .Values.notifications .Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-failed.workspace1: {{ .Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-succeeded.workspace1: {{ .Values.notifications.slack_channel_id }} + {{- end }} + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + project: "{{ .Values.argo.projects.apps }}" + destination: + server: {{ .Values.cluster.url }} + namespace: default + source: + repoURL: "{{ .Values.source.repo_url }}" + path: instance-applications/500-540-ibm-mas-suite-app-install + targetRevision: "{{ .Values.source.revision }}" + plugin: + name: {{ .Values.avp.name }} + env: + - name: {{ .Values.avp.values_varname }} + value: | + instance_id: "{{ .Values.instance.id }}" + argo_namespace: "{{ .Values.argo.namespace }}" + mas_app_api_version: "{{ .Values.ibm_suite_app_health_install.mas_app_api_version }}" + mas_app_kind: "{{ .Values.ibm_suite_app_health_install.mas_app_kind }}" + mas_app_namespace: "{{ .Values.ibm_suite_app_health_install.mas_app_namespace }}" + ibm_entitlement_key: "{{ .Values.ibm_suite_app_health_install.ibm_entitlement_key }}" + mas_app_id: "{{ .Values.ibm_suite_app_health_install.mas_app_id }}" + mas_app_catalog_source: "{{ .Values.ibm_suite_app_health_install.mas_app_catalog_source }}" + mas_app_channel: "{{ .Values.ibm_suite_app_health_install.mas_app_channel }}" + mas_edition: "{{ .Values.ibm_suite_app_health_install.mas_edition }}" + run_sync_hooks: {{ .Values.ibm_suite_app_health_install.run_sync_hooks }} + mas_manual_cert_mgmt: "{{ .Values.ibm_suite_app_health_install.mas_manual_cert_mgmt }}" + {{- if eq .Values.ibm_suite_app_health_install.mas_manual_cert_mgmt true }} + public_tls_secret_name: "{{ .Values.ibm_suite_app_health_install.public_tls_secret_name }}" + ca_cert: | + {{ .Values.ibm_suite_app_health_install.ca_cert }} + tls_cert: | + {{ .Values.ibm_suite_app_health_install.tls_cert }} + tls_key: | + {{ .Values.ibm_suite_app_health_install.tls_key }} + {{- end }} + mas_app_spec: {{ .Values.ibm_suite_app_health_install.mas_app_spec | toYaml | nindent 14 }} + {{- if .Values.custom_labels }} + custom_labels: {{ .Values.custom_labels | toYaml | nindent 14 }} + {{- end }} + - name: ARGOCD_APP_NAME + value: "health-install" + {{- if not (empty .Values.avp.secret) }} + - name: AVP_SECRET + value: {{ .Values.avp.secret }} + {{- end }} + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=false + - RespectIgnoreDifferences=true +{{- end }} diff --git a/root-applications/ibm-mas-instance-root/templates/520-ibm-mas-masapp-monitor-install.yaml b/root-applications/ibm-mas-instance-root/templates/520-ibm-mas-masapp-monitor-install.yaml new file mode 100644 index 000000000..d8fa74a9b --- /dev/null +++ b/root-applications/ibm-mas-instance-root/templates/520-ibm-mas-masapp-monitor-install.yaml @@ -0,0 +1,75 @@ +{{- if not (empty .Values.ibm_suite_app_monitor_install) }} +--- +# IBM Maximo Operator Catalog +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: monitor.{{ .Values.cluster.id }}.{{ .Values.instance.id }} + namespace: {{ .Values.argo.namespace }} + labels: + environment: '{{ .Values.account.id }}' + region: '{{ .Values.region.id }}' + cluster: '{{ .Values.cluster.id }}' + instance: '{{ .Values.instance.id }}' + masapp: 'monitor' + annotations: + argocd.argoproj.io/sync-wave: "520" + {{- if and .Values.notifications .Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-failed.workspace1: {{ .Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-succeeded.workspace1: {{ .Values.notifications.slack_channel_id }} + {{- end }} + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + project: "{{ .Values.argo.projects.apps }}" + destination: + server: {{ .Values.cluster.url }} + namespace: default + source: + repoURL: "{{ .Values.source.repo_url }}" + path: instance-applications/500-540-ibm-mas-suite-app-install + targetRevision: "{{ .Values.source.revision }}" + plugin: + name: {{ .Values.avp.name }} + env: + - name: {{ .Values.avp.values_varname }} + value: | + instance_id: "{{ .Values.instance.id }}" + argo_namespace: "{{ .Values.argo.namespace }}" + mas_app_api_version: "{{ .Values.ibm_suite_app_monitor_install.mas_app_api_version }}" + mas_app_kind: "{{ .Values.ibm_suite_app_monitor_install.mas_app_kind }}" + mas_app_namespace: "{{ .Values.ibm_suite_app_monitor_install.mas_app_namespace }}" + ibm_entitlement_key: "{{ .Values.ibm_suite_app_monitor_install.ibm_entitlement_key }}" + mas_app_id: "{{ .Values.ibm_suite_app_monitor_install.mas_app_id }}" + mas_app_catalog_source: "{{ .Values.ibm_suite_app_monitor_install.mas_app_catalog_source }}" + mas_app_channel: "{{ .Values.ibm_suite_app_monitor_install.mas_app_channel }}" + mas_edition: "{{ .Values.ibm_suite_app_monitor_install.mas_edition }}" + run_sync_hooks: {{ .Values.ibm_suite_app_monitor_install.run_sync_hooks }} + mas_manual_cert_mgmt: "{{ .Values.ibm_suite_app_monitor_install.mas_manual_cert_mgmt }}" + {{- if eq .Values.ibm_suite_app_monitor_install.mas_manual_cert_mgmt true }} + public_tls_secret_name: "{{ .Values.ibm_suite_app_monitor_install.public_tls_secret_name }}" + ca_cert: | + {{ .Values.ibm_suite_app_monitor_install.ca_cert }} + tls_cert: | + {{ .Values.ibm_suite_app_monitor_install.tls_cert }} + tls_key: | + {{ .Values.ibm_suite_app_monitor_install.tls_key }} + {{- end }} + mas_app_spec: {{ .Values.ibm_suite_app_monitor_install.mas_app_spec | toYaml | nindent 14 }} + {{- if .Values.custom_labels }} + custom_labels: {{ .Values.custom_labels | toYaml | nindent 14 }} + {{- end }} + - name: ARGOCD_APP_NAME + value: "monitor-install" + {{- if not (empty .Values.avp.secret) }} + - name: AVP_SECRET + value: {{ .Values.avp.secret }} + {{- end }} + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=false + - RespectIgnoreDifferences=true +{{- end }} diff --git a/root-applications/ibm-mas-instance-root/templates/520-ibm-mas-masapp-optimizer-install.yaml b/root-applications/ibm-mas-instance-root/templates/520-ibm-mas-masapp-optimizer-install.yaml new file mode 100644 index 000000000..4c9e748cc --- /dev/null +++ b/root-applications/ibm-mas-instance-root/templates/520-ibm-mas-masapp-optimizer-install.yaml @@ -0,0 +1,75 @@ +{{- if not (empty .Values.ibm_suite_app_optimizer_install) }} +--- +# IBM Maximo Operator Catalog +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: optimizer.{{ .Values.cluster.id }}.{{ .Values.instance.id }} + namespace: {{ .Values.argo.namespace }} + labels: + environment: '{{ .Values.account.id }}' + region: '{{ .Values.region.id }}' + cluster: '{{ .Values.cluster.id }}' + instance: '{{ .Values.instance.id }}' + masapp: 'optimizer' + annotations: + argocd.argoproj.io/sync-wave: "520" + {{- if and .Values.notifications .Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-failed.workspace1: {{ .Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-succeeded.workspace1: {{ .Values.notifications.slack_channel_id }} + {{- end }} + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + project: "{{ .Values.argo.projects.apps }}" + destination: + server: {{ .Values.cluster.url }} + namespace: default + source: + repoURL: "{{ .Values.source.repo_url }}" + path: instance-applications/500-540-ibm-mas-suite-app-install + targetRevision: "{{ .Values.source.revision }}" + plugin: + name: {{ .Values.avp.name }} + env: + - name: {{ .Values.avp.values_varname }} + value: | + instance_id: "{{ .Values.instance.id }}" + argo_namespace: "{{ .Values.argo.namespace }}" + mas_app_api_version: "{{ .Values.ibm_suite_app_optimizer_install.mas_app_api_version }}" + mas_app_kind: "{{ .Values.ibm_suite_app_optimizer_install.mas_app_kind }}" + mas_app_namespace: "{{ .Values.ibm_suite_app_optimizer_install.mas_app_namespace }}" + ibm_entitlement_key: "{{ .Values.ibm_suite_app_optimizer_install.ibm_entitlement_key }}" + mas_app_id: "{{ .Values.ibm_suite_app_optimizer_install.mas_app_id }}" + mas_app_catalog_source: "{{ .Values.ibm_suite_app_optimizer_install.mas_app_catalog_source }}" + mas_app_channel: "{{ .Values.ibm_suite_app_optimizer_install.mas_app_channel }}" + mas_edition: "{{ .Values.ibm_suite_app_optimizer_install.mas_edition }}" + run_sync_hooks: {{ .Values.ibm_suite_app_optimizer_install.run_sync_hooks }} + mas_manual_cert_mgmt: "{{ .Values.ibm_suite_app_optimizer_install.mas_manual_cert_mgmt }}" + {{- if eq .Values.ibm_suite_app_optimizer_install.mas_manual_cert_mgmt true }} + public_tls_secret_name: "{{ .Values.ibm_suite_app_optimizer_install.public_tls_secret_name }}" + ca_cert: | + {{ .Values.ibm_suite_app_optimizer_install.ca_cert }} + tls_cert: | + {{ .Values.ibm_suite_app_optimizer_install.tls_cert }} + tls_key: | + {{ .Values.ibm_suite_app_optimizer_install.tls_key }} + {{- end }} + mas_app_spec: {{ .Values.ibm_suite_app_optimizer_install.mas_app_spec | toYaml | nindent 14 }} + {{- if .Values.custom_labels }} + custom_labels: {{ .Values.custom_labels | toYaml | nindent 14 }} + {{- end }} + - name: ARGOCD_APP_NAME + value: "optimizer-install" + {{- if not (empty .Values.avp.secret) }} + - name: AVP_SECRET + value: {{ .Values.avp.secret }} + {{- end }} + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=false + - RespectIgnoreDifferences=true +{{- end }} diff --git a/root-applications/ibm-mas-instance-root/templates/540-ibm-mas-masapp-predict-install.yaml b/root-applications/ibm-mas-instance-root/templates/540-ibm-mas-masapp-predict-install.yaml new file mode 100644 index 000000000..324cb011e --- /dev/null +++ b/root-applications/ibm-mas-instance-root/templates/540-ibm-mas-masapp-predict-install.yaml @@ -0,0 +1,75 @@ +{{- if not (empty .Values.ibm_suite_app_predict_install) }} +--- +# IBM Maximo Operator Catalog +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: predict.{{ .Values.cluster.id }}.{{ .Values.instance.id }} + namespace: {{ .Values.argo.namespace }} + labels: + environment: '{{ .Values.account.id }}' + region: '{{ .Values.region.id }}' + cluster: '{{ .Values.cluster.id }}' + instance: '{{ .Values.instance.id }}' + masapp: 'predict' + annotations: + argocd.argoproj.io/sync-wave: "540" + {{- if and .Values.notifications .Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-failed.workspace1: {{ .Values.notifications.slack_channel_id }} + notifications.argoproj.io/subscribe.on-sync-succeeded.workspace1: {{ .Values.notifications.slack_channel_id }} + {{- end }} + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + project: "{{ .Values.argo.projects.apps }}" + destination: + server: {{ .Values.cluster.url }} + namespace: default + source: + repoURL: "{{ .Values.source.repo_url }}" + path: instance-applications/500-540-ibm-mas-suite-app-install + targetRevision: "{{ .Values.source.revision }}" + plugin: + name: {{ .Values.avp.name }} + env: + - name: {{ .Values.avp.values_varname }} + value: | + instance_id: "{{ .Values.instance.id }}" + argo_namespace: "{{ .Values.argo.namespace }}" + mas_app_api_version: "{{ .Values.ibm_suite_app_predict_install.mas_app_api_version }}" + mas_app_kind: "{{ .Values.ibm_suite_app_predict_install.mas_app_kind }}" + mas_app_namespace: "{{ .Values.ibm_suite_app_predict_install.mas_app_namespace }}" + ibm_entitlement_key: "{{ .Values.ibm_suite_app_predict_install.ibm_entitlement_key }}" + mas_app_id: "{{ .Values.ibm_suite_app_predict_install.mas_app_id }}" + mas_app_catalog_source: "{{ .Values.ibm_suite_app_predict_install.mas_app_catalog_source }}" + mas_app_channel: "{{ .Values.ibm_suite_app_predict_install.mas_app_channel }}" + mas_edition: "{{ .Values.ibm_suite_app_predict_install.mas_edition }}" + run_sync_hooks: {{ .Values.ibm_suite_app_predict_install.run_sync_hooks }} + mas_manual_cert_mgmt: "{{ .Values.ibm_suite_app_predict_install.mas_manual_cert_mgmt }}" + {{- if eq .Values.ibm_suite_app_predict_install.mas_manual_cert_mgmt true }} + public_tls_secret_name: "{{ .Values.ibm_suite_app_predict_install.public_tls_secret_name }}" + ca_cert: | + {{ .Values.ibm_suite_app_predict_install.ca_cert }} + tls_cert: | + {{ .Values.ibm_suite_app_predict_install.tls_cert }} + tls_key: | + {{ .Values.ibm_suite_app_predict_install.tls_key }} + {{- end }} + mas_app_spec: {{ .Values.ibm_suite_app_predict_install.mas_app_spec | toYaml | nindent 14 }} + {{- if .Values.custom_labels }} + custom_labels: {{ .Values.custom_labels | toYaml | nindent 14 }} + {{- end }} + - name: ARGOCD_APP_NAME + value: "predict-install" + {{- if not (empty .Values.avp.secret) }} + - name: AVP_SECRET + value: {{ .Values.avp.secret }} + {{- end }} + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=false + - RespectIgnoreDifferences=true +{{- end }} diff --git a/root-applications/ibm-mas-instance-root/values.yaml b/root-applications/ibm-mas-instance-root/values.yaml new file mode 100644 index 000000000..c603ebd3b --- /dev/null +++ b/root-applications/ibm-mas-instance-root/values.yaml @@ -0,0 +1,25 @@ +--- + +avp: + name: "argocd-vault-plugin-helm" + secret: "" + values_varname: "HELM_VALUES" + +source: + repo_url: "https://github.com/ibm-mas/gitops" + revision: "poc" + +# Customers will definitely need to customise this (our gitops-envs/mas-dev repos are private), +# So deliberately not specifying defaults here; we want charts to refuse to render if these are not specified +# Both of these correspond to requirement arguments of the gitops-bootstrap CLI function (--github-url and --github-revision) +# generator: +# repo_url: "" +# revision: "" + +# These defaults align with the ArgoCD worker setup by gitops-bootstrap +# (openshift-gitops with a single ArgoCD project "mas") +argo: + namespace: "openshift-gitops" + projects: + rootapps: "mas" + apps: "mas"