From b7d254b6c0b2b8227c98716423e5fb2b75a87de5 Mon Sep 17 00:00:00 2001 From: Paul Schultz Date: Wed, 4 Dec 2024 09:11:09 -0600 Subject: [PATCH 1/6] chore: remove deprecated rotate file (#2056) Signed-off-by: Paul Schultz --- docs/audit-log.md | 163 -------------------- packages/backend/package.json | 3 +- packages/backend/src/index.ts | 6 +- packages/backend/src/logger/customLogger.ts | 24 --- yarn.lock | 24 --- 5 files changed, 2 insertions(+), 218 deletions(-) diff --git a/docs/audit-log.md b/docs/audit-log.md index f6217d14fb..0bf7b25c54 100644 --- a/docs/audit-log.md +++ b/docs/audit-log.md @@ -13,166 +13,3 @@ auditLog: console: enabled: false ``` - -### Logging to a Rotating File - -#### Enabling Rotating File Logging - -To enable audit logging to a rotating file, set the following in your configuration (this feature is disabled by default): - -```yaml -auditLog: - rotateFile: - enabled: true -``` - -With this configuration, the default behavior is: - -- Rotate logs at midnight (local system timezone) -- Log file format: redhat-developer-hub-audit-%DATE%.log -- Log files stored in /var/log/redhat-developer-hub/audit -- No automatic log file deletion -- No gzip compression of archived logs -- No file size limit - -#### Customizing Log File Location and Name - -To change the directory where log files are stored, specify a custom path (an absolute path is recommended): By default, the audit logs are written in the `/var/log/redhat-developer-hub/audit` directory. - -```yaml -auditLog: - rotateFile: - logFileDirPath: /custom-path -``` - ---- - -**NOTE** - -The specified directory will be created automatically if it does not exist. - ---- - -By default, the audit log files will be in the following format: `redhat-developer-hub-audit-%DATE%.log` where `%DATE%` is the format specified in [`auditLog.rotateFile.dateFormat`](#configuring-file-retention-policy). - -To customize the log file name format, use: - -```yaml -auditLog: - rotateFile: - logFileName: custom-audit-log-%DATE%.log -``` - -#### Configuring File Rotation Frequency - -The default file rotation occurs daily at 00:00 local time. You can adjust the rotation frequency with the following configurations: - -```yaml -auditLog: - rotateFile: - frequency: '12h' # Default: `custom` - dateFormat: 'YYYY-MM-DD' # Default: `YYYY-MM-DD` - utc: false # Default: `false` - maxSize: 100m # Default: undefined -``` - -`frequency` options include: - -- `daily`: Rotate daily at 00:00 local time -- `Xm`: Rotate every X minutes (where X is a number between 0 and 59) -- `Xh`: Rotate every X hours (where X is a number between 0 and 23) -- `test`: Rotate every 1 minute -- `custom`: Use `dateFormat` to set the rotation frequency (default if frequency is not specified) - ---- - -**NOTE** -If `frequency` is set to `Xh`, `Xm` or `test`, the `dateFormat` setting must be configured in a format that includes the specified time component. Otherwise, the rotation will not work as expected. - -For example, `dateFormat: 'YYYY-MM-DD-HH'` for hourly rotation. `dateFormat: 'YYYY-MM-DD-HH-mm'` for minute rotation. - ---- - -Examples: - -```yaml -auditLog: - rotateFile: - # If you want to rotate the file every 17 minutes - dateFormat: 'YYYY-MM-DD-HH-mm' - frequency: '17m' -``` - -The `dateFormat` setting configures both the %DATE% in logFileName and the file rotation frequency if frequency is set to `custom`. The default format is `YYYY-MM-DD`, meaning daily rotation. Supported values are based on [Moment.js formats](https://momentjs.com/docs/#/displaying/format/). - -If `frequency` is set to `custom`, then rotations will take place when the date string, represented in the specified `dateFormat`, changes. - -Examples: - -```yaml -auditLog: - rotateFile: - # If you want rotations to occur every week for some reason and at the start of each month. Example `%DATE$` = '2025-Jul-Week 30' - dateFormat: 'YYYY-MMM-[Week] ww' -``` - -```yaml -auditLog: - rotateFile: - # If you want to rotate the file at noon and midnight - dateFormat: 'YYYY-MM-DD-A' -``` - -To use UTC time for `dateFormat` instead of local time: - -```yaml -auditLog: - rotateFile: - utc: true # Default: False -``` - -To set a maximum log file size before rotation (which would add a count suffix to the filename upon reaching the size limit): Ex: `redhat-developer-hub-audit-2024-07-22.log.3`. - -To configure `maxSize`, provide a number followed by one of `k`, `m`, or `g` to specify the file size in kilobytes, megabytes, or gigabytes. No `maxSize` is configured by default. - -```yaml -auditLog: - rotateFile: - maxSize: 100m # Sets a max file size limit of 100MB for audit log -``` - -#### Configuring File Retention Policy - -By default, log files are not deleted or archived. You can configure the maximum number of files to keep: - -```yaml -auditLog: - rotateFile: - maxFilesOrDays: 14 # Deletes the oldest log when there are more than 14 log files -``` - -Or, configure the maximum number of days to retain logs by appending: - -```yaml -auditLog: - rotateFile: - maxFilesOrDays: 5d # Deletes logs older than 5 days -``` - ---- - -**NOTE** - -If log deletion is enabled, a `.-audit.json` will be generated in the directory where the logs are to track generated logs. Any log file not contained in it will not be subject to automatic deletion. - -Currently, a new `.-audit.json` file is generated every time the backend is started. This means old audit logs will no longer be tracked/deleted with the exception of any log files reused by the current backend. - ---- - -To archive and compress rotated logs using gzip: - -```yaml -auditLog: - rotateFile: - zippedArchive: true # Default: false -``` diff --git a/packages/backend/package.json b/packages/backend/package.json index 20f3130fc2..c828e46d69 100644 --- a/packages/backend/package.json +++ b/packages/backend/package.json @@ -60,8 +60,7 @@ "app": "*", "global-agent": "3.0.0", "undici": "6.19.8", - "winston": "3.14.2", - "winston-daily-rotate-file": "5.0.0" + "winston": "3.14.2" }, "devDependencies": { "@backstage/cli": "0.28.2", diff --git a/packages/backend/src/index.ts b/packages/backend/src/index.ts index b1e6ec8e0e..7aa11d64c3 100644 --- a/packages/backend/src/index.ts +++ b/packages/backend/src/index.ts @@ -45,11 +45,7 @@ backend.add( logger: config => { const auditLogConfig = config?.getOptionalConfig('auditLog'); return { - transports: [ - ...transports.log, - ...transports.auditLog(auditLogConfig), - ...transports.auditLogFile(auditLogConfig), - ], + transports: [...transports.log, ...transports.auditLog(auditLogConfig)], }; }, }), diff --git a/packages/backend/src/logger/customLogger.ts b/packages/backend/src/logger/customLogger.ts index fe1687ffda..31365fee6d 100644 --- a/packages/backend/src/logger/customLogger.ts +++ b/packages/backend/src/logger/customLogger.ts @@ -3,8 +3,6 @@ import type { Config } from '@backstage/config'; import * as winston from 'winston'; -import 'winston-daily-rotate-file'; - const defaultFormat = winston.format.combine( winston.format.timestamp({ format: 'YYYY-MM-DD HH:mm:ss', @@ -51,28 +49,6 @@ export const transports = { }), ]; }, - auditLogFile: (config?: Config) => { - if (!config?.getOptionalBoolean('rotateFile.enabled')) { - return []; - } - return [ - new winston.transports.DailyRotateFile({ - format: auditLogWinstonFormat, - dirname: - config?.getOptionalString('rotateFile.logFileDirPath') ?? - '/var/log/redhat-developer-hub/audit', - filename: - config?.getOptionalString('rotateFile.logFileName') ?? - 'redhat-developer-hub-audit-%DATE%.log', - datePattern: config?.getOptionalString('rotateFile.dateFormat'), - frequency: config?.getOptionalString('rotateFile.frequency'), - zippedArchive: config?.getOptionalBoolean('rotateFile.zippedArchive'), - utc: config?.getOptionalBoolean('rotateFile.utc'), - maxSize: config?.getOptionalString('rotateFile.maxSize'), - maxFiles: config?.getOptional('rotateFile.maxFilesOrDays'), - }), - ]; - }, }; export const createStaticLogger = ({ service }: { service: string }) => { diff --git a/yarn.lock b/yarn.lock index 85c1f237b1..fba5163d35 100644 --- a/yarn.lock +++ b/yarn.lock @@ -22404,7 +22404,6 @@ __metadata: prettier: 3.4.1 undici: 6.19.8 winston: 3.14.2 - winston-daily-rotate-file: 5.0.0 languageName: unknown linkType: soft @@ -28614,15 +28613,6 @@ __metadata: languageName: node linkType: hard -"file-stream-rotator@npm:^0.6.1": - version: 0.6.1 - resolution: "file-stream-rotator@npm:0.6.1" - dependencies: - moment: ^2.29.1 - checksum: ebdf6a9e7ca886a50f4dafb2284d4569cefd5bdf4e4451ead25f4d68b7f9776b2620a3d110d534edd40935d1e17f37d818e2129303201870ff89c71b19b49ac1 - languageName: node - linkType: hard - "file-type@npm:^16.5.4": version: 16.5.4 resolution: "file-type@npm:16.5.4" @@ -45636,20 +45626,6 @@ __metadata: languageName: node linkType: hard -"winston-daily-rotate-file@npm:5.0.0": - version: 5.0.0 - resolution: "winston-daily-rotate-file@npm:5.0.0" - dependencies: - file-stream-rotator: ^0.6.1 - object-hash: ^3.0.0 - triple-beam: ^1.4.1 - winston-transport: ^4.7.0 - peerDependencies: - winston: ^3 - checksum: 45d0a1c1d1a178a22a6f92f4248139e0889720947d5afa657314826d6ea48e7dceae37521e2de2ed3a121993c4ae4ddcb0b510613c489a68d2eed689a304bef5 - languageName: node - linkType: hard - "winston-transport@npm:^4.5.0, winston-transport@npm:^4.7.0": version: 4.7.0 resolution: "winston-transport@npm:4.7.0" From 4f7d6c4a9fa7b72515c4a1480ce7cff86e7cfddc Mon Sep 17 00:00:00 2001 From: Kashish Mittal <113269381+04kash@users.noreply.github.com> Date: Thu, 5 Dec 2024 12:02:52 -0500 Subject: [PATCH 2/6] chore(e2e-tests): enable tech radar plugin tests (#1963) * chore(e2e-tests): Enable tech radar plugin tests Signed-off-by: Kashish Mittal * update e2e-tests config Signed-off-by: Kashish Mittal * update tech radar e2e tests Signed-off-by: Kashish Mittal * update tech radar e2e tests Signed-off-by: Kashish Mittal * use UI_HELPER_ELEMENTS instead of UIhelperPO in tech radar tests Signed-off-by: Kashish Mittal * update e2e test config to include techRadar Signed-off-by: Kashish Mittal * update secrets-rhdh-secrets.yaml Signed-off-by: Kashish Mittal * add host for service rendering tech radar data Signed-off-by: Kashish Mittal * add url for service rendering tech radar data Signed-off-by: Kashish Mittal * update tech radar related config Signed-off-by: Kashish Mittal * make tech radar tests PR friendly * encode DH_TARGET_URL before converting adding it to secrets * always set DH_TARGET_URL not just in PR tests --------- Signed-off-by: Kashish Mittal Co-authored-by: Omar Al Jaljuli --- .ibm/pipelines/auth/secrets-rhdh-secrets.yaml | 2 +- .ibm/pipelines/env_variables.sh | 2 +- .ibm/pipelines/openshift-ci-tests.sh | 4 +++- .../resources/config_map/app-config-rhdh.yaml | 12 ++++++------ .ibm/pipelines/value_files/values_showcase.yaml | 4 ++-- .../dynamic-plugins-info.spec.ts | 6 +++--- .../e2e/plugins/quick-access-and-tech-radar.spec.ts | 4 ++-- 7 files changed, 18 insertions(+), 16 deletions(-) diff --git a/.ibm/pipelines/auth/secrets-rhdh-secrets.yaml b/.ibm/pipelines/auth/secrets-rhdh-secrets.yaml index 0ec98ab1ed..51d9e76640 100644 --- a/.ibm/pipelines/auth/secrets-rhdh-secrets.yaml +++ b/.ibm/pipelines/auth/secrets-rhdh-secrets.yaml @@ -29,7 +29,7 @@ data: KEYCLOAK_CLIENT_ID: bXljbGllbnQ= KEYCLOAK_CLIENT_SECRET: dGVtcA== ACR_SECRET: dGVtcA== - DH_TARGET_URL: aHR0cDovL3Rlc3QtYmFja3N0YWdlLWN1c3RvbWl6YXRpb24tcHJvdmlkZXItc2hvd2Nhc2UtY2kucmhkaC1wci1vcy1hOTgwNTY1MDgzMGIyMmMzYWVlMjQzZTUxZDc5NTY1ZC0wMDAwLnVzLWVhc3QuY29udGFpbmVycy5hcHBkb21haW4uY2xvdWQ= + DH_TARGET_URL: dGVzdC1iYWNrc3RhZ2UtY3VzdG9taXphdGlvbi1wcm92aWRlci1zaG93Y2FzZS1jaS5yaGRoLXByLW9zLWE5ODA1NjUwODMwYjIyYzNhZWUyNDNlNTFkNzk1NjVkLTAwMDAudXMtZWFzdC5jb250YWluZXJzLmFwcGRvbWFpbi5jbG91ZA== GOOGLE_CLIENT_ID: dGVtcA== GOOGLE_CLIENT_SECRET: dGVtcA== type: Opaque diff --git a/.ibm/pipelines/env_variables.sh b/.ibm/pipelines/env_variables.sh index 41346c3344..924321e036 100755 --- a/.ibm/pipelines/env_variables.sh +++ b/.ibm/pipelines/env_variables.sh @@ -67,7 +67,7 @@ KEYCLOAK_REALM='myrealm' KEYCLOAK_CLIENT_ID='myclient' KEYCLOAK_CLIENT_SECRET=$(cat /tmp/secrets/KEYCLOAK_CLIENT_SECRET) ACR_SECRET=$(cat /tmp/secrets/ACR_SECRET) -DH_TARGET_URL=aHR0cDovL3Rlc3QtYmFja3N0YWdlLWN1c3RvbWl6YXRpb24tcHJvdmlkZXItc2hvd2Nhc2UtY2kucmhkaC1wci1vcy1hOTgwNTY1MDgzMGIyMmMzYWVlMjQzZTUxZDc5NTY1ZC0wMDAwLnVzLWVhc3QuY29udGFpbmVycy5hcHBkb21haW4uY2xvdWQ= +DH_TARGET_URL=dGVzdC1iYWNrc3RhZ2UtY3VzdG9taXphdGlvbi1wcm92aWRlci1zaG93Y2FzZS1jaS5yaGRoLXByLW9zLWE5ODA1NjUwODMwYjIyYzNhZWUyNDNlNTFkNzk1NjVkLTAwMDAudXMtZWFzdC5jb250YWluZXJzLmFwcGRvbWFpbi5jbG91ZA== GOOGLE_CLIENT_ID=$(cat /tmp/secrets/GOOGLE_CLIENT_ID) GOOGLE_CLIENT_SECRET=$(cat /tmp/secrets/GOOGLE_CLIENT_SECRET) GOOGLE_ACC_COOKIE=$(cat /tmp/secrets/GOOGLE_ACC_COOKIE) diff --git a/.ibm/pipelines/openshift-ci-tests.sh b/.ibm/pipelines/openshift-ci-tests.sh index 84b14129d6..a49fb6b4ac 100755 --- a/.ibm/pipelines/openshift-ci-tests.sh +++ b/.ibm/pipelines/openshift-ci-tests.sh @@ -211,7 +211,9 @@ apply_yaml_files() { GITHUB_APP_CLIENT_SECRET=$(cat /tmp/secrets/GITHUB_APP_4_CLIENT_SECRET) fi - for key in GITHUB_APP_APP_ID GITHUB_APP_CLIENT_ID GITHUB_APP_PRIVATE_KEY GITHUB_APP_CLIENT_SECRET GITHUB_APP_JANUS_TEST_APP_ID GITHUB_APP_JANUS_TEST_CLIENT_ID GITHUB_APP_JANUS_TEST_CLIENT_SECRET GITHUB_APP_JANUS_TEST_PRIVATE_KEY GITHUB_APP_WEBHOOK_URL GITHUB_APP_WEBHOOK_SECRET KEYCLOAK_CLIENT_SECRET ACR_SECRET GOOGLE_CLIENT_ID GOOGLE_CLIENT_SECRET K8S_CLUSTER_TOKEN_ENCODED OCM_CLUSTER_URL GITLAB_TOKEN; do + DH_TARGET_URL=$(echo -n "test-backstage-customization-provider-${project}.${K8S_CLUSTER_ROUTER_BASE}" | base64 -w 0) + + for key in GITHUB_APP_APP_ID GITHUB_APP_CLIENT_ID GITHUB_APP_PRIVATE_KEY GITHUB_APP_CLIENT_SECRET GITHUB_APP_JANUS_TEST_APP_ID GITHUB_APP_JANUS_TEST_CLIENT_ID GITHUB_APP_JANUS_TEST_CLIENT_SECRET GITHUB_APP_JANUS_TEST_PRIVATE_KEY GITHUB_APP_WEBHOOK_URL GITHUB_APP_WEBHOOK_SECRET KEYCLOAK_CLIENT_SECRET ACR_SECRET GOOGLE_CLIENT_ID GOOGLE_CLIENT_SECRET K8S_CLUSTER_TOKEN_ENCODED OCM_CLUSTER_URL GITLAB_TOKEN DH_TARGET_URL; do sed -i "s|${key}:.*|${key}: ${!key}|g" "$dir/auth/secrets-rhdh-secrets.yaml" done diff --git a/.ibm/pipelines/resources/config_map/app-config-rhdh.yaml b/.ibm/pipelines/resources/config_map/app-config-rhdh.yaml index ca054ba60a..ad49b0850c 100644 --- a/.ibm/pipelines/resources/config_map/app-config-rhdh.yaml +++ b/.ibm/pipelines/resources/config_map/app-config-rhdh.yaml @@ -53,6 +53,10 @@ dynamicPlugins: title: Dark Dynamic variant: dark backend: + reading: + allow: + - host: 'github.com' + - host: ${DH_TARGET_URL} auth: keys: - secret: temp @@ -101,17 +105,13 @@ auth: development: clientId: ${GOOGLE_CLIENT_ID} clientSecret: ${GOOGLE_CLIENT_SECRET} +techRadar: + url: "http://${DH_TARGET_URL}/tech-radar" proxy: skipInvalidProxies: true # endpoints: {} endpoints: # Other Proxies - # customize developer hub instance - '/developer-hub': - target: ${DH_TARGET_URL} - changeOrigin: true - # Change to "false" in case of using self hosted cluster with a self-signed certificate - secure: false '/acr/api': target: 'https://rhdhqetest.azurecr.io/acr/v1/' changeOrigin: true diff --git a/.ibm/pipelines/value_files/values_showcase.yaml b/.ibm/pipelines/value_files/values_showcase.yaml index cb5268bf28..d4ad325217 100644 --- a/.ibm/pipelines/value_files/values_showcase.yaml +++ b/.ibm/pipelines/value_files/values_showcase.yaml @@ -94,11 +94,11 @@ global: disabled: false - package: ./dynamic-plugins/dist/red-hat-developer-hub-backstage-plugin-bulk-import disabled: false - # Enable tech-radar plugin. + # Enable tech-radar plugins. - package: ./dynamic-plugins/dist/backstage-community-plugin-tech-radar disabled: false - package: ./dynamic-plugins/dist/backstage-community-plugin-tech-radar-backend-dynamic - disabled: true + disabled: false - package: ./dynamic-plugins/dist/backstage-community-plugin-acr disabled: false - package: ./dynamic-plugins/dist/roadiehq-scaffolder-backend-module-http-request-dynamic diff --git a/e2e-tests/playwright/e2e/plugins/dynamic-plugins-info/dynamic-plugins-info.spec.ts b/e2e-tests/playwright/e2e/plugins/dynamic-plugins-info/dynamic-plugins-info.spec.ts index a2506b4786..ffcff037a5 100644 --- a/e2e-tests/playwright/e2e/plugins/dynamic-plugins-info/dynamic-plugins-info.spec.ts +++ b/e2e-tests/playwright/e2e/plugins/dynamic-plugins-info/dynamic-plugins-info.spec.ts @@ -38,14 +38,14 @@ test.describe("dynamic-plugins-info UI tests", () => { await uiHelper.verifyRowsInTable(["backstage-plugin-techdocs"], true); }); - test.skip("it should have a backstage-plugin-tech-radar plugin which is Enabled and Preinstalled", async ({ + test("it should have a plugin-tech-radar plugin which is Enabled and Preinstalled", async ({ page, }) => { await page .getByPlaceholder("Filter") - .pressSequentially("backstage-plugin-tech-radar\n", { delay: 300 }); + .pressSequentially("plugin-tech-radar\n", { delay: 300 }); const row = await page.locator( - UI_HELPER_ELEMENTS.rowByText("backstage-plugin-tech-radar"), + UI_HELPER_ELEMENTS.rowByText("backstage-community-plugin-tech-radar"), ); expect(await row.locator("td").nth(2).innerText()).toBe("Yes"); // enabled expect(await row.locator("td").nth(3).innerText()).toBe("Yes"); // preinstalled diff --git a/e2e-tests/playwright/e2e/plugins/quick-access-and-tech-radar.spec.ts b/e2e-tests/playwright/e2e/plugins/quick-access-and-tech-radar.spec.ts index c553f37456..fa3de30189 100644 --- a/e2e-tests/playwright/e2e/plugins/quick-access-and-tech-radar.spec.ts +++ b/e2e-tests/playwright/e2e/plugins/quick-access-and-tech-radar.spec.ts @@ -4,9 +4,9 @@ import { Common } from "../../utils/common"; import { UIhelper } from "../../utils/ui-helper"; import { TechRadar } from "../../support/pages/tech-radar"; -// Pre-req: Enable backstage-plugin-tech-radar and backstage-plugin-tech-radar-backend Plugin +// Pre-req: Enable plugin-tech-radar and plugin-tech-radar-backend Plugin -test.describe.skip("Test Customized Quick Access and tech-radar plugin", () => { +test.describe("Test Customized Quick Access and tech-radar plugin", () => { test.beforeEach(async ({ page }) => { const common = new Common(page); await common.loginAsGuest(); From d3c2916e2cfd49c7f2f33d6ca1a8f31a398aadf9 Mon Sep 17 00:00:00 2001 From: Bethany Griggs Date: Thu, 5 Dec 2024 19:20:25 +0000 Subject: [PATCH 3/6] chore: update location of @backstage-community/plugin-scaffolder-backend-module-annotator (#2060) * chore: update location of @backstage-community/plugin-scaffolder-backend-module-annotator Signed-off-by: Beth Griggs * fixup! yarn prettier:fix --------- Signed-off-by: Beth Griggs --- .../e2e/catalog-scaffolded-from-link.spec.ts | 2 +- packages/backend/package.json | 2 +- packages/backend/src/index.ts | 4 ++- yarn.lock | 28 +++++++++---------- 4 files changed, 19 insertions(+), 17 deletions(-) diff --git a/e2e-tests/playwright/e2e/catalog-scaffolded-from-link.spec.ts b/e2e-tests/playwright/e2e/catalog-scaffolded-from-link.spec.ts index 940ddb661f..754163c5f4 100644 --- a/e2e-tests/playwright/e2e/catalog-scaffolded-from-link.spec.ts +++ b/e2e-tests/playwright/e2e/catalog-scaffolded-from-link.spec.ts @@ -14,7 +14,7 @@ test.describe.skip("Link Scaffolded Templates to Catalog Items", () => { let catalogImport: CatalogImport; const template = - "https://github.com/janus-idp/backstage-plugins/blob/main/plugins/scaffolder-annotator-action/examples/templates/01-scaffolder-template.yaml"; + "https://github.com/backstage/community-plugins/blob/main/workspaces/scaffolder-backend-module-annotator/plugins/scaffolder-backend-module-annotator/examples/templates/01-scaffolder-template.yaml"; const reactAppDetails = { owner: "janus-qe/maintainers", diff --git a/packages/backend/package.json b/packages/backend/package.json index c828e46d69..9c28cf4762 100644 --- a/packages/backend/package.json +++ b/packages/backend/package.json @@ -22,6 +22,7 @@ "dependencies": { "@backstage-community/plugin-rbac-backend": "5.2.6", "@backstage-community/plugin-rbac-node": "1.8.2", + "@backstage-community/plugin-scaffolder-backend-module-annotator": "2.2.2", "@backstage/backend-app-api": "1.0.1", "@backstage/backend-defaults": "0.5.2", "@backstage/backend-dynamic-feature-service": "0.4.4", @@ -50,7 +51,6 @@ "@internal/plugin-licensed-users-info-backend": "*", "@internal/plugin-scalprum-backend": "*", "@janus-idp/backstage-plugin-audit-log-node": "1.7.0", - "@janus-idp/backstage-scaffolder-backend-module-annotator": "2.2.0", "@opentelemetry/auto-instrumentations-node": "0.50.2", "@opentelemetry/exporter-prometheus": "0.53.0", "@opentelemetry/host-metrics": "0.35.4", diff --git a/packages/backend/src/index.ts b/packages/backend/src/index.ts index 7aa11d64c3..812796b107 100644 --- a/packages/backend/src/index.ts +++ b/packages/backend/src/index.ts @@ -83,7 +83,9 @@ backend.add(import('@backstage/plugin-search-backend-module-catalog')); backend.add(import('@backstage/plugin-events-backend')); backend.add(import('@backstage-community/plugin-rbac-backend')); -backend.add(import('@janus-idp/backstage-scaffolder-backend-module-annotator')); +backend.add( + import('@backstage-community/plugin-scaffolder-backend-module-annotator'), +); backend.add(pluginIDProviderService); backend.add(rbacDynamicPluginsProvider); diff --git a/yarn.lock b/yarn.lock index fba5163d35..4733e21a29 100644 --- a/yarn.lock +++ b/yarn.lock @@ -3858,6 +3858,19 @@ __metadata: languageName: node linkType: hard +"@backstage-community/plugin-scaffolder-backend-module-annotator@npm:2.2.2": + version: 2.2.2 + resolution: "@backstage-community/plugin-scaffolder-backend-module-annotator@npm:2.2.2" + dependencies: + "@backstage/backend-plugin-api": ^1.0.1 + "@backstage/plugin-scaffolder-node": ^0.5.0 + fs-extra: ^11.2.0 + lodash: ^4.17.21 + yaml: ^2.0.0 + checksum: b963e291aa2a68cdae92d39f65483d58e90846f0d330687ffe1b82d2c985f2a79ec5aab521e81634d1f5ce8873f047587db4161264f3657e8859b9b89a6af0c7 + languageName: node + linkType: hard + "@backstage-community/plugin-scaffolder-backend-module-quay@npm:2.2.2": version: 2.2.2 resolution: "@backstage-community/plugin-scaffolder-backend-module-quay@npm:2.2.2" @@ -10954,19 +10967,6 @@ __metadata: languageName: node linkType: hard -"@janus-idp/backstage-scaffolder-backend-module-annotator@npm:2.2.0": - version: 2.2.0 - resolution: "@janus-idp/backstage-scaffolder-backend-module-annotator@npm:2.2.0" - dependencies: - "@backstage/backend-plugin-api": ^1.0.1 - "@backstage/plugin-scaffolder-node": ^0.5.0 - fs-extra: ^11.2.0 - lodash: ^4.17.21 - yaml: ^2.0.0 - checksum: 9156f5856b79479bff83ba61236a46b2a3506570330113faa2c746057d90943ec615aeebf4548dc509327b98ac052aa71df2837c12f93bde518923f07866b113 - languageName: node - linkType: hard - "@janus-idp/cli@npm:1.18.5": version: 1.18.5 resolution: "@janus-idp/cli@npm:1.18.5" @@ -22361,6 +22361,7 @@ __metadata: dependencies: "@backstage-community/plugin-rbac-backend": 5.2.6 "@backstage-community/plugin-rbac-node": 1.8.2 + "@backstage-community/plugin-scaffolder-backend-module-annotator": 2.2.2 "@backstage/backend-app-api": 1.0.1 "@backstage/backend-defaults": 0.5.2 "@backstage/backend-dynamic-feature-service": 0.4.4 @@ -22390,7 +22391,6 @@ __metadata: "@internal/plugin-licensed-users-info-backend": "*" "@internal/plugin-scalprum-backend": "*" "@janus-idp/backstage-plugin-audit-log-node": 1.7.0 - "@janus-idp/backstage-scaffolder-backend-module-annotator": 2.2.0 "@opentelemetry/auto-instrumentations-node": 0.50.2 "@opentelemetry/exporter-prometheus": 0.53.0 "@opentelemetry/host-metrics": 0.35.4 From ca5d117accdb8c0d6d13e2afd94eebb78c1c5667 Mon Sep 17 00:00:00 2001 From: Subhash Khileri Date: Fri, 6 Dec 2024 16:17:51 +0530 Subject: [PATCH 4/6] fix catalog scaffolded test (#2066) --- e2e-tests/playwright/e2e/catalog-scaffolded-from-link.spec.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/e2e-tests/playwright/e2e/catalog-scaffolded-from-link.spec.ts b/e2e-tests/playwright/e2e/catalog-scaffolded-from-link.spec.ts index 754163c5f4..d9794e7e2f 100644 --- a/e2e-tests/playwright/e2e/catalog-scaffolded-from-link.spec.ts +++ b/e2e-tests/playwright/e2e/catalog-scaffolded-from-link.spec.ts @@ -8,13 +8,13 @@ import { GITHUB_API_ENDPOINTS } from "../utils/api-endpoints"; let page: Page; // TODO: replace skip with serial -test.describe.skip("Link Scaffolded Templates to Catalog Items", () => { +test.describe.serial("Link Scaffolded Templates to Catalog Items", () => { let uiHelper: UIhelper; let common: Common; let catalogImport: CatalogImport; const template = - "https://github.com/backstage/community-plugins/blob/main/workspaces/scaffolder-backend-module-annotator/plugins/scaffolder-backend-module-annotator/examples/templates/01-scaffolder-template.yaml"; + "https://github.com/janus-qe/01-scaffolder-template/blob/main/01-scaffolder-template.yaml"; const reactAppDetails = { owner: "janus-qe/maintainers", From 8d80488322a8433977017fe0ce854f451515bae8 Mon Sep 17 00:00:00 2001 From: Gustavo Lira e Silva Date: Fri, 6 Dec 2024 14:13:13 -0300 Subject: [PATCH 5/6] refactor: modularize OpenShift CI script for better maintainability - new (#2032) * modularize OpenShift CI script for better maintainability * Add log copying for auth providers in utils.sh script This commit introduces copying of logs from `auth-providers-logs` directory to the artifact directory. It ensures that logs for authentication providers are now included in the pipeline artifacts, aiding in troubleshooting and analysis. Additionally, the indentation for the screenshots copying block is corrected for consistency. --- .ibm/pipelines/cluster/aks/deployment.sh | 4 +- .ibm/pipelines/cluster/gke/deployment.sh | 4 +- .ibm/pipelines/jobs/aks.sh | 30 + .ibm/pipelines/jobs/gke.sh | 24 + .ibm/pipelines/jobs/main.sh | 42 ++ .ibm/pipelines/jobs/ocp-v4-15.sh | 21 + .ibm/pipelines/jobs/ocp-v4-16.sh | 21 + .ibm/pipelines/jobs/operator.sh | 12 + .ibm/pipelines/jobs/periodic.sh | 40 ++ .ibm/pipelines/openshift-ci-tests.sh | 545 ++---------------- .ibm/pipelines/utils.sh | 424 +++++++++++++- e2e-tests/playwright.config.ts | 1 + .../e2e/plugins/topology/topology.spec.ts | 3 +- 13 files changed, 669 insertions(+), 502 deletions(-) create mode 100644 .ibm/pipelines/jobs/aks.sh create mode 100644 .ibm/pipelines/jobs/gke.sh create mode 100644 .ibm/pipelines/jobs/main.sh create mode 100644 .ibm/pipelines/jobs/ocp-v4-15.sh create mode 100644 .ibm/pipelines/jobs/ocp-v4-16.sh create mode 100644 .ibm/pipelines/jobs/operator.sh create mode 100644 .ibm/pipelines/jobs/periodic.sh diff --git a/.ibm/pipelines/cluster/aks/deployment.sh b/.ibm/pipelines/cluster/aks/deployment.sh index 459a04c341..245ebe1106 100644 --- a/.ibm/pipelines/cluster/aks/deployment.sh +++ b/.ibm/pipelines/cluster/aks/deployment.sh @@ -1,5 +1,4 @@ initiate_aks_deployment() { - install_helm add_helm_repos delete_namespace "${NAME_SPACE_RBAC_K8S}" configure_namespace "${NAME_SPACE_K8S}" @@ -20,7 +19,6 @@ initiate_aks_deployment() { } initiate_rbac_aks_deployment() { - install_helm add_helm_repos delete_namespace "${NAME_SPACE_K8S}" configure_namespace "${NAME_SPACE_RBAC_K8S}" @@ -38,4 +36,4 @@ initiate_rbac_aks_deployment() { --set global.host="${K8S_CLUSTER_ROUTER_BASE}" \ --set upstream.backstage.image.repository="${QUAY_REPO}" \ --set upstream.backstage.image.tag="${TAG_NAME}" -} \ No newline at end of file +} diff --git a/.ibm/pipelines/cluster/gke/deployment.sh b/.ibm/pipelines/cluster/gke/deployment.sh index 813fefd67c..99c910fd81 100644 --- a/.ibm/pipelines/cluster/gke/deployment.sh +++ b/.ibm/pipelines/cluster/gke/deployment.sh @@ -1,6 +1,5 @@ initiate_gke_deployment() { gcloud_ssl_cert_create $GKE_CERT_NAME $GKE_INSTANCE_DOMAIN_NAME $GOOGLE_CLOUD_PROJECT - install_helm add_helm_repos delete_namespace "${NAME_SPACE_RBAC_K8S}" configure_namespace "${NAME_SPACE_K8S}" @@ -24,7 +23,6 @@ initiate_gke_deployment() { initiate_rbac_gke_deployment() { gcloud_ssl_cert_create $GKE_CERT_NAME $GKE_INSTANCE_DOMAIN_NAME $GOOGLE_CLOUD_PROJECT - install_helm add_helm_repos delete_namespace "${NAME_SPACE_K8S}" configure_namespace "${NAME_SPACE_RBAC_K8S}" @@ -43,4 +41,4 @@ initiate_rbac_gke_deployment() { --set upstream.backstage.image.repository="${QUAY_REPO}" \ --set upstream.backstage.image.tag="${TAG_NAME}" \ --set upstream.ingress.annotations."ingress\.gcp\.kubernetes\.io/pre-shared-cert"="${GKE_CERT_NAME}" -} \ No newline at end of file +} diff --git a/.ibm/pipelines/jobs/aks.sh b/.ibm/pipelines/jobs/aks.sh new file mode 100644 index 0000000000..ddf4d33c56 --- /dev/null +++ b/.ibm/pipelines/jobs/aks.sh @@ -0,0 +1,30 @@ +#!/bin/sh + +handle_aks() { + echo "Starting AKS deployment" + for file in ${DIR}/cluster/aks/*.sh; do source $file; done + + export K8S_CLUSTER_URL=$(cat /tmp/secrets/RHDH_AKS_CLUSTER_URL) + export K8S_CLUSTER_TOKEN=$(cat /tmp/secrets/RHDH_AKS_CLUSTER_TOKEN) + export K8S_CLUSTER_ROUTER_BASE=$AKS_INSTANCE_DOMAIN_NAME + export NAME_SPACE_K8S="showcase-k8s-ci-nightly" + export NAME_SPACE_RBAC_K8S="showcase-rbac-k8s-ci-nightly" + + url="https://${K8S_CLUSTER_ROUTER_BASE}" + + az_login + az_aks_start "${AKS_NIGHTLY_CLUSTER_NAME}" "${AKS_NIGHTLY_CLUSTER_RESOURCEGROUP}" + az_aks_approuting_enable "${AKS_NIGHTLY_CLUSTER_NAME}" "${AKS_NIGHTLY_CLUSTER_RESOURCEGROUP}" + az_aks_get_credentials "${AKS_NIGHTLY_CLUSTER_NAME}" "${AKS_NIGHTLY_CLUSTER_RESOURCEGROUP}" + + set_github_app_3_credentials + + initiate_aks_deployment + check_and_test "${RELEASE_NAME}" "${NAME_SPACE_K8S}" "${url}" + delete_namespace "${NAME_SPACE_K8S}" + initiate_rbac_aks_deployment + check_and_test "${RELEASE_NAME_RBAC}" "${NAME_SPACE_RBAC_K8S}" + delete_namespace "${NAME_SPACE_RBAC_K8S}" +} + + diff --git a/.ibm/pipelines/jobs/gke.sh b/.ibm/pipelines/jobs/gke.sh new file mode 100644 index 0000000000..71e8de697c --- /dev/null +++ b/.ibm/pipelines/jobs/gke.sh @@ -0,0 +1,24 @@ +#!/bin/sh + +handle_gke() { + echo "Starting GKE deployment" + for file in ${DIR}/cluster/gke/*.sh; do source $file; done + + export K8S_CLUSTER_ROUTER_BASE=$GKE_INSTANCE_DOMAIN_NAME + export NAME_SPACE_K8S="showcase-k8s-ci-nightly" + export NAME_SPACE_RBAC_K8S="showcase-rbac-k8s-ci-nightly" + url="https://${K8S_CLUSTER_ROUTER_BASE}" + + gcloud_auth "${GKE_SERVICE_ACCOUNT_NAME}" "/tmp/secrets/GKE_SERVICE_ACCOUNT_KEY" + gcloud_gke_get_credentials "${GKE_CLUSTER_NAME}" "${GKE_CLUSTER_REGION}" "${GOOGLE_CLOUD_PROJECT}" + + set_github_app_3_credentials + + initiate_gke_deployment + check_and_test "${RELEASE_NAME}" "${NAME_SPACE_K8S}" "${url}" + delete_namespace "${NAME_SPACE_K8S}" + initiate_rbac_gke_deployment + check_and_test "${RELEASE_NAME_RBAC}" "${NAME_SPACE_RBAC_K8S}" + delete_namespace "${NAME_SPACE_RBAC_K8S}" + +} diff --git a/.ibm/pipelines/jobs/main.sh b/.ibm/pipelines/jobs/main.sh new file mode 100644 index 0000000000..027252e91d --- /dev/null +++ b/.ibm/pipelines/jobs/main.sh @@ -0,0 +1,42 @@ +#!/bin/sh + +set -x + +set_namespace() { + # Enable parallel PR testing for main branch by utilizing a pool of namespaces + local namespaces_pool=("pr-1" "pr-2" "pr-3") + local namespace_found=false + # Iterate through namespace pool to find an available set + for ns in "${namespaces_pool[@]}"; do + if ! oc get namespace "showcase-$ns" >/dev/null 2>&1; then + echo "Namespace "showcase-$ns" does not exist, Using NS: showcase-$ns, showcase-rbac-$ns, postgress-external-db-$ns" + export NAME_SPACE="showcase-$ns" + export NAME_SPACE_RBAC="showcase-rbac-$ns" + export NAME_SPACE_POSTGRES_DB="postgress-external-db-$ns" + namespace_found=true + break + fi + done + if ! $namespace_found; then + echo "Error: All namespaces $namespaces_pool already in Use" + exit 1 + fi +} + +handle_main() { + echo "Configuring namespace: ${NAME_SPACE}" + set_github_app_4_credentials + set_namespace + oc_login + + API_SERVER_URL=$(oc whoami --show-server) + ENCODED_API_SERVER_URL=$(echo "${API_SERVER_URL}" | base64) + ENCODED_CLUSTER_NAME=$(echo "my-cluster" | base64) + + export K8S_CLUSTER_ROUTER_BASE=$(oc get route console -n openshift-console -o=jsonpath='{.spec.host}' | sed 's/^[^.]*\.//') + local url="https://${RELEASE_NAME}-backstage-${NAME_SPACE}.${K8S_CLUSTER_ROUTER_BASE}" + initiate_deployments + deploy_test_backstage_provider "${NAME_SPACE}" + check_and_test "${RELEASE_NAME}" "${NAME_SPACE}" "${url}" + check_and_test "${RELEASE_NAME_RBAC}" "${NAME_SPACE_RBAC}" "${url}" +} diff --git a/.ibm/pipelines/jobs/ocp-v4-15.sh b/.ibm/pipelines/jobs/ocp-v4-15.sh new file mode 100644 index 0000000000..7bc8d941c1 --- /dev/null +++ b/.ibm/pipelines/jobs/ocp-v4-15.sh @@ -0,0 +1,21 @@ +#!/bin/sh + +handle_ocp_4_15() { + K8S_CLUSTER_URL=$(cat /tmp/secrets/RHDH_OS_2_CLUSTER_URL) + K8S_CLUSTER_TOKEN=$(cat /tmp/secrets/RHDH_OS_2_CLUSTER_TOKEN) + + oc_login + + API_SERVER_URL=$(oc whoami --show-server) + ENCODED_API_SERVER_URL=$(echo "${API_SERVER_URL}" | base64) + ENCODED_CLUSTER_NAME=$(echo "my-cluster" | base64) + + export K8S_CLUSTER_ROUTER_BASE=$(oc get route console -n openshift-console -o=jsonpath='{.spec.host}' | sed 's/^[^.]*\.//') + apply_yaml_files "${DIR}" "${NAME_SPACE}" + deploy_test_backstage_provider "${NAME_SPACE}" + local url="https://${release_name}-backstage-${namespace}.${K8S_CLUSTER_ROUTER_BASE}" + + initiate_deployments + check_and_test "${RELEASE_NAME}" "${NAME_SPACE}" "${url}" + check_and_test "${RELEASE_NAME_RBAC}" "${NAME_SPACE_RBAC}" "${url}" +} diff --git a/.ibm/pipelines/jobs/ocp-v4-16.sh b/.ibm/pipelines/jobs/ocp-v4-16.sh new file mode 100644 index 0000000000..c8c8f8ed4f --- /dev/null +++ b/.ibm/pipelines/jobs/ocp-v4-16.sh @@ -0,0 +1,21 @@ +#!/bin/sh + +handle_ocp_4_16() { + K8S_CLUSTER_URL=$(cat /tmp/secrets/RHDH_OS_1_CLUSTER_URL) + K8S_CLUSTER_TOKEN=$(cat /tmp/secrets/RHDH_OS_1_CLUSTER_TOKEN) + + oc_login + + API_SERVER_URL=$(oc whoami --show-server) + ENCODED_API_SERVER_URL=$(echo "${API_SERVER_URL}" | base64) + ENCODED_CLUSTER_NAME=$(echo "my-cluster" | base64) + + export K8S_CLUSTER_ROUTER_BASE=$(oc get route console -n openshift-console -o=jsonpath='{.spec.host}' | sed 's/^[^.]*\.//') + apply_yaml_files "${DIR}" "${NAME_SPACE}" + deploy_test_backstage_provider "${NAME_SPACE}" + local url="https://${release_name}-backstage-${namespace}.${K8S_CLUSTER_ROUTER_BASE}" + + initiate_deployments + check_and_test "${RELEASE_NAME}" "${NAME_SPACE}" "${url}" + check_and_test "${RELEASE_NAME_RBAC}" "${NAME_SPACE_RBAC}" "${url}" +} diff --git a/.ibm/pipelines/jobs/operator.sh b/.ibm/pipelines/jobs/operator.sh new file mode 100644 index 0000000000..58b70c0442 --- /dev/null +++ b/.ibm/pipelines/jobs/operator.sh @@ -0,0 +1,12 @@ +#!/bin/sh + +handle_operator() { + oc_login + + API_SERVER_URL=$(oc whoami --show-server) + ENCODED_API_SERVER_URL=$(echo "${API_SERVER_URL}" | base64) + ENCODED_CLUSTER_NAME=$(echo "my-cluster" | base64) + + apply_yaml_files "${DIR}" "${NAME_SPACE}" + deploy_test_backstage_provider "${NAME_SPACE}" +} diff --git a/.ibm/pipelines/jobs/periodic.sh b/.ibm/pipelines/jobs/periodic.sh new file mode 100644 index 0000000000..921eb7fb20 --- /dev/null +++ b/.ibm/pipelines/jobs/periodic.sh @@ -0,0 +1,40 @@ +#!/bin/sh + +handle_nightly() { + export NAME_SPACE="showcase-ci-nightly" + export NAME_SPACE_RBAC="showcase-rbac-nightly" + export NAME_SPACE_POSTGRES_DB="postgress-external-db-nightly" + export NAME_SPACE_K8S="showcase-k8s-ci-nightly" + export NAME_SPACE_RBAC_K8S="showcase-rbac-k8s-ci-nightly" + + oc_login + + API_SERVER_URL=$(oc whoami --show-server) + ENCODED_API_SERVER_URL=$(echo "${API_SERVER_URL}" | base64) + ENCODED_CLUSTER_NAME=$(echo "my-cluster" | base64) + + export K8S_CLUSTER_ROUTER_BASE=$(oc get route console -n openshift-console -o=jsonpath='{.spec.host}' | sed 's/^[^.]*\.//') + + configure_namespace "${NAME_SPACE}" + deploy_test_backstage_provider "${NAME_SPACE}" + local url="https://${RELEASE_NAME}-backstage-${NAME_SPACE}.${K8S_CLUSTER_ROUTER_BASE}" + install_pipelines_operator + sleep 20 # wait for Pipeline Operator/Tekton pipelines to be ready + oc apply -f "$dir/resources/pipeline-run/hello-world-pipeline.yaml" + oc apply -f "$dir/resources/pipeline-run/hello-world-pipeline-run.yaml" + initiate_deployments + check_and_test "${RELEASE_NAME}" "${NAME_SPACE}" "${url}" + check_and_test "${RELEASE_NAME_RBAC}" "${NAME_SPACE_RBAC}" "${url}" + + # Only test TLS config with RDS and Change configuration at runtime in nightly jobs + initiate_rds_deployment "${RELEASE_NAME}" "${NAME_SPACE_RDS}" + check_and_test "${RELEASE_NAME}" "${NAME_SPACE_RDS}" "${url}" + + # Deploy `showcase-runtime` to run tests that require configuration changes at runtime + configure_namespace "${NAME_SPACE_RUNTIME}" + uninstall_helmchart "${NAME_SPACE_RUNTIME}" "${RELEASE_NAME}" + oc apply -f "$DIR/resources/redis-cache/redis-deployment.yaml" --namespace="${NAME_SPACE_RUNTIME}" + apply_yaml_files "${DIR}" "${NAME_SPACE_RUNTIME}" + helm upgrade -i "${RELEASE_NAME}" -n "${NAME_SPACE_RUNTIME}" "${HELM_REPO_NAME}/${HELM_IMAGE_NAME}" --version "${CHART_VERSION}" -f "${DIR}/value_files/${HELM_CHART_VALUE_FILE_NAME}" --set global.clusterRouterBase="${K8S_CLUSTER_ROUTER_BASE}" --set upstream.backstage.image.repository="${QUAY_REPO}" --set upstream.backstage.image.tag="${TAG_NAME}" + check_and_test "${RELEASE_NAME}" "${NAME_SPACE_RUNTIME}" "${url}" +} diff --git a/.ibm/pipelines/openshift-ci-tests.sh b/.ibm/pipelines/openshift-ci-tests.sh index a49fb6b4ac..ca3dfcd8e3 100755 --- a/.ibm/pipelines/openshift-ci-tests.sh +++ b/.ibm/pipelines/openshift-ci-tests.sh @@ -4,7 +4,7 @@ set -xe export PS4='[$(date "+%Y-%m-%d %H:%M:%S")] ' # logs timestamp for every cmd. LOGFILE="test-log" -DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +export DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" secret_name="rhdh-k8s-plugin-secret" OVERALL_RESULT=0 @@ -23,498 +23,67 @@ cleanup() { trap cleanup EXIT INT ERR -source "${DIR}/utils.sh" -if [[ "$JOB_NAME" == *aks* ]]; then - for file in ${DIR}/cluster/aks/*.sh; do source $file; done -elif [[ "$JOB_NAME" == *gke* ]]; then - for file in ${DIR}/cluster/gke/*.sh; do source $file; done -fi - -set_cluster_info() { - export K8S_CLUSTER_URL=$(cat /tmp/secrets/RHDH_PR_OS_CLUSTER_URL) - export K8S_CLUSTER_TOKEN=$(cat /tmp/secrets/RHDH_PR_OS_CLUSTER_TOKEN) - - if [[ "$JOB_NAME" == *ocp-v4-16 ]]; then - K8S_CLUSTER_URL=$(cat /tmp/secrets/RHDH_OS_1_CLUSTER_URL) - K8S_CLUSTER_TOKEN=$(cat /tmp/secrets/RHDH_OS_1_CLUSTER_TOKEN) - elif [[ "$JOB_NAME" == *ocp-v4-15 ]]; then - K8S_CLUSTER_URL=$(cat /tmp/secrets/RHDH_OS_2_CLUSTER_URL) - K8S_CLUSTER_TOKEN=$(cat /tmp/secrets/RHDH_OS_2_CLUSTER_TOKEN) - elif [[ "$JOB_NAME" == *aks* ]]; then - K8S_CLUSTER_URL=$(cat /tmp/secrets/RHDH_AKS_CLUSTER_URL) - K8S_CLUSTER_TOKEN=$(cat /tmp/secrets/RHDH_AKS_CLUSTER_TOKEN) - fi -} - -set_namespace() { - if [[ "$JOB_NAME" == *periodic-* ]]; then - NAME_SPACE="showcase-ci-nightly" - NAME_SPACE_RBAC="showcase-rbac-nightly" - NAME_SPACE_POSTGRES_DB="postgress-external-db-nightly" - NAME_SPACE_K8S="showcase-k8s-ci-nightly" - NAME_SPACE_RBAC_K8S="showcase-rbac-k8s-ci-nightly" - elif [[ "$JOB_NAME" == *pull-*-main-e2e-tests* ]]; then - # Enable parallel PR testing for main branch by utilizing a pool of namespaces - local namespaces_pool=("pr-1" "pr-2" "pr-3") - local namespace_found=false - # Iterate through namespace pool to find an available set - for ns in "${namespaces_pool[@]}"; do - if ! oc get namespace "showcase-$ns" >/dev/null 2>&1; then - echo "Namespace "showcase-$ns" does not exist, Using NS: showcase-$ns, showcase-rbac-$ns, postgress-external-db-$ns" - NAME_SPACE="showcase-$ns" - NAME_SPACE_RBAC="showcase-rbac-$ns" - NAME_SPACE_POSTGRES_DB="postgress-external-db-$ns" - namespace_found=true - break - fi - done - if ! $namespace_found; then - echo "Error: All namespaces $namespaces_pool already in Use" - exit 1 - fi - fi -} - -add_helm_repos() { - helm version - - local repos=( - "bitnami=https://charts.bitnami.com/bitnami" - "backstage=https://backstage.github.io/charts" - "${HELM_REPO_NAME}=${HELM_REPO_URL}" - ) - - for repo in "${repos[@]}"; do - local key="${repo%%=*}" - local value="${repo##*=}" - - if ! helm repo list | grep -q "^$key"; then - helm repo add "$key" "$value" - else - echo "Repository $key already exists - updating repository instead." - fi - done - - helm repo update -} - -install_oc() { - if command -v oc >/dev/null 2>&1; then - echo "oc is already installed." - else - curl -LO https://mirror.openshift.com/pub/openshift-v4/clients/oc/latest/linux/oc.tar.gz - tar -xf oc.tar.gz - mv oc /usr/local/bin/ - rm oc.tar.gz - echo "oc installed successfully." - fi -} - -install_helm() { - if command -v helm >/dev/null 2>&1; then - echo "Helm is already installed." - else - echo "Installing Helm 3 client" - mkdir ~/tmpbin && cd ~/tmpbin - curl -sL https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash -f - export PATH=$(pwd):$PATH - echo "Helm client installed successfully." - fi -} - -uninstall_helmchart() { - local project=$1 - local release=$2 - if helm list -n "${project}" | grep -q "${release}"; then - echo "Chart already exists. Removing it before install." - helm uninstall "${release}" -n "${project}" - fi -} - -configure_namespace() { - local project=$1 - delete_namespace $project - oc create namespace "${project}" - oc config set-context --current --namespace="${project}" -} - -delete_namespace() { - local project=$1 - if oc get namespace "$project" >/dev/null 2>&1; then - echo "Namespace ${project} exists. Attempting to delete..." - - # Remove blocking finalizers - remove_finalizers_from_resources "$project" - - # Attempt to delete the namespace - oc delete namespace "$project" --grace-period=0 --force || true - - # Check if namespace is still stuck in 'Terminating' and force removal if necessary - if oc get namespace "$project" -o jsonpath='{.status.phase}' | grep -q 'Terminating'; then - echo "Namespace ${project} is stuck in Terminating. Forcing deletion..." - force_delete_namespace "$project" - fi - fi -} - -configure_external_postgres_db() { - local project=$1 - oc apply -f "${DIR}/resources/postgres-db/postgres.yaml" --namespace="${NAME_SPACE_POSTGRES_DB}" - sleep 5 - - oc get secret postgress-external-db-cluster-cert -n "${NAME_SPACE_POSTGRES_DB}" -o jsonpath='{.data.ca\.crt}' | base64 --decode > postgres-ca - oc get secret postgress-external-db-cluster-cert -n "${NAME_SPACE_POSTGRES_DB}" -o jsonpath='{.data.tls\.crt}' | base64 --decode > postgres-tls-crt - oc get secret postgress-external-db-cluster-cert -n "${NAME_SPACE_POSTGRES_DB}" -o jsonpath='{.data.tls\.key}' | base64 --decode > postgres-tsl-key - - oc create secret generic postgress-external-db-cluster-cert \ - --from-file=ca.crt=postgres-ca \ - --from-file=tls.crt=postgres-tls-crt \ - --from-file=tls.key=postgres-tsl-key \ - --dry-run=client -o yaml | oc apply -f - --namespace="${project}" - - POSTGRES_PASSWORD=$(oc get secret/postgress-external-db-pguser-janus-idp -n "${NAME_SPACE_POSTGRES_DB}" -o jsonpath={.data.password}) - sed -i "s|POSTGRES_PASSWORD:.*|POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}|g" "${DIR}/resources/postgres-db/postgres-cred.yaml" - POSTGRES_HOST=$(echo -n "postgress-external-db-primary.$NAME_SPACE_POSTGRES_DB.svc.cluster.local" | base64 | tr -d '\n') - sed -i "s|POSTGRES_HOST:.*|POSTGRES_HOST: ${POSTGRES_HOST}|g" "${DIR}/resources/postgres-db/postgres-cred.yaml" - oc apply -f "${DIR}/resources/postgres-db/postgres-cred.yaml" --namespace="${project}" -} - -apply_yaml_files() { - local dir=$1 - local project=$2 - echo "Applying YAML files to namespace ${project}" - - oc config set-context --current --namespace="${project}" - - local files=( - "$dir/resources/service_account/service-account-rhdh.yaml" - "$dir/resources/cluster_role_binding/cluster-role-binding-k8s.yaml" - "$dir/resources/cluster_role/cluster-role-k8s.yaml" - "$dir/resources/cluster_role/cluster-role-ocm.yaml" - "$dir/auth/secrets-rhdh-secrets.yaml" - ) - - for file in "${files[@]}"; do - sed -i "s/namespace:.*/namespace: ${project}/g" "$file" - done - - if [[ "$JOB_NAME" == *aks* || "$JOB_NAME" == *gke* || "$JOB_NAME" == *operator* ]]; then - GITHUB_APP_APP_ID=$GITHUB_APP_3_APP_ID - GITHUB_APP_CLIENT_ID=$GITHUB_APP_3_CLIENT_ID - GITHUB_APP_PRIVATE_KEY=$GITHUB_APP_3_PRIVATE_KEY - GITHUB_APP_CLIENT_SECRET=$GITHUB_APP_3_CLIENT_SECRET - elif [[ "$JOB_NAME" == *pull-*-main-e2e-tests* ]]; then - # GITHUB_APP_4 for all pr's on main branch. - GITHUB_APP_APP_ID=$(cat /tmp/secrets/GITHUB_APP_4_APP_ID) - GITHUB_APP_CLIENT_ID=$(cat /tmp/secrets/GITHUB_APP_4_CLIENT_ID) - GITHUB_APP_PRIVATE_KEY=$(cat /tmp/secrets/GITHUB_APP_4_PRIVATE_KEY) - GITHUB_APP_CLIENT_SECRET=$(cat /tmp/secrets/GITHUB_APP_4_CLIENT_SECRET) - fi - - DH_TARGET_URL=$(echo -n "test-backstage-customization-provider-${project}.${K8S_CLUSTER_ROUTER_BASE}" | base64 -w 0) - - for key in GITHUB_APP_APP_ID GITHUB_APP_CLIENT_ID GITHUB_APP_PRIVATE_KEY GITHUB_APP_CLIENT_SECRET GITHUB_APP_JANUS_TEST_APP_ID GITHUB_APP_JANUS_TEST_CLIENT_ID GITHUB_APP_JANUS_TEST_CLIENT_SECRET GITHUB_APP_JANUS_TEST_PRIVATE_KEY GITHUB_APP_WEBHOOK_URL GITHUB_APP_WEBHOOK_SECRET KEYCLOAK_CLIENT_SECRET ACR_SECRET GOOGLE_CLIENT_ID GOOGLE_CLIENT_SECRET K8S_CLUSTER_TOKEN_ENCODED OCM_CLUSTER_URL GITLAB_TOKEN DH_TARGET_URL; do - sed -i "s|${key}:.*|${key}: ${!key}|g" "$dir/auth/secrets-rhdh-secrets.yaml" - done - - oc apply -f "$dir/resources/service_account/service-account-rhdh.yaml" --namespace="${project}" - oc apply -f "$dir/auth/service-account-rhdh-secret.yaml" --namespace="${project}" - oc apply -f "$dir/auth/secrets-rhdh-secrets.yaml" --namespace="${project}" - if [[ "$JOB_NAME" != *aks* && "$JOB_NAME" != *gke* ]]; then - oc new-app https://github.com/janus-qe/test-backstage-customization-provider --namespace="${project}" - oc expose svc/test-backstage-customization-provider --namespace="${project}" - fi - oc apply -f "$dir/resources/cluster_role/cluster-role-k8s.yaml" --namespace="${project}" - oc apply -f "$dir/resources/cluster_role_binding/cluster-role-binding-k8s.yaml" --namespace="${project}" - oc apply -f "$dir/resources/cluster_role/cluster-role-ocm.yaml" --namespace="${project}" - oc apply -f "$dir/resources/cluster_role_binding/cluster-role-binding-ocm.yaml" --namespace="${project}" - - if [[ "$JOB_NAME" != *aks* ]]; then # Skip for AKS, because of strange `sed: -e expression #1, char 136: unterminated `s' command` - sed -i "s/K8S_CLUSTER_API_SERVER_URL:.*/K8S_CLUSTER_API_SERVER_URL: ${ENCODED_API_SERVER_URL}/g" "$dir/auth/secrets-rhdh-secrets.yaml" - fi - sed -i "s/K8S_CLUSTER_NAME:.*/K8S_CLUSTER_NAME: ${ENCODED_CLUSTER_NAME}/g" "$dir/auth/secrets-rhdh-secrets.yaml" - - set +x - token=$(oc get secret "${secret_name}" -n "${project}" -o=jsonpath='{.data.token}') - sed -i "s/OCM_CLUSTER_TOKEN: .*/OCM_CLUSTER_TOKEN: ${token}/" "$dir/auth/secrets-rhdh-secrets.yaml" - set -x - - if [[ "${project}" == *rbac* ]]; then - oc create configmap app-config-rhdh --from-file="app-config-rhdh.yaml"="$dir/resources/config_map/app-config-rhdh-rbac.yaml" --namespace="${project}" --dry-run=client -o yaml | oc apply -f - - elif [[ "$JOB_NAME" == *aks* || "$JOB_NAME" == *gke* ]]; then - yq 'del(.backend.cache)' "$dir/resources/config_map/app-config-rhdh.yaml" \ - | kubectl create configmap app-config-rhdh --from-file="app-config-rhdh.yaml"="/dev/stdin" --namespace="${project}" --dry-run=client -o yaml \ - | kubectl apply -f - - else - oc create configmap app-config-rhdh --from-file="app-config-rhdh.yaml"="$dir/resources/config_map/app-config-rhdh.yaml" --namespace="${project}" --dry-run=client -o yaml | oc apply -f - - fi - oc create configmap rbac-policy --from-file="rbac-policy.csv"="$dir/resources/config_map/rbac-policy.csv" --namespace="${project}" --dry-run=client -o yaml | oc apply -f - - oc apply -f "$dir/auth/secrets-rhdh-secrets.yaml" --namespace="${project}" - - #sleep 20 # wait for Pipeline Operator/Tekton pipelines to be ready - # Renable when namespace termination issue is solved - # oc apply -f "$dir/resources/pipeline-run/hello-world-pipeline.yaml" - # oc apply -f "$dir/resources/pipeline-run/hello-world-pipeline-run.yaml" -} - -run_tests() { - local release_name=$1 - local project=$2 - project=${project%-pr-*} # Remove -pr- suffix if any set for main branchs pr's. - cd "${DIR}/../../e2e-tests" - yarn install - yarn playwright install chromium - - Xvfb :99 & - export DISPLAY=:99 - - ( - set -e - echo "Using PR container image: ${TAG_NAME}" - yarn "$project" - ) 2>&1 | tee "/tmp/${LOGFILE}" - - local RESULT=${PIPESTATUS[0]} +export K8S_CLUSTER_URL=$(cat /tmp/secrets/RHDH_PR_OS_CLUSTER_URL) +export K8S_CLUSTER_TOKEN=$(cat /tmp/secrets/RHDH_PR_OS_CLUSTER_TOKEN) - pkill Xvfb - - mkdir -p "${ARTIFACT_DIR}/${project}/test-results" - mkdir -p "${ARTIFACT_DIR}/${project}/attachments/screenshots" - cp -a /tmp/backstage-showcase/e2e-tests/test-results/* "${ARTIFACT_DIR}/${project}/test-results" - cp -a /tmp/backstage-showcase/e2e-tests/${JUNIT_RESULTS} "${ARTIFACT_DIR}/${project}/${JUNIT_RESULTS}" - - if [ -d "/tmp/backstage-showcase/e2e-tests/screenshots" ]; then - cp -a /tmp/backstage-showcase/e2e-tests/screenshots/* "${ARTIFACT_DIR}/${project}/attachments/screenshots/" - fi - - if [ -d "/tmp/backstage-showcase/e2e-tests/auth-providers-logs" ]; then - cp -a /tmp/backstage-showcase/e2e-tests/auth-providers-logs/* "${ARTIFACT_DIR}/${project}/" - fi - - ansi2html <"/tmp/${LOGFILE}" >"/tmp/${LOGFILE}.html" - cp -a "/tmp/${LOGFILE}.html" "${ARTIFACT_DIR}/${project}" - cp -a /tmp/backstage-showcase/e2e-tests/playwright-report/* "${ARTIFACT_DIR}/${project}" - - droute_send "${release_name}" "${project}" - - echo "${project} RESULT: ${RESULT}" - if [ "${RESULT}" -ne 0 ]; then - OVERALL_RESULT=1 - fi -} - -check_backstage_running() { - local release_name=$1 - local namespace=$2 - if [[ "$JOB_NAME" == *aks* || "$JOB_NAME" == *gke* ]]; then - local url="https://${K8S_CLUSTER_ROUTER_BASE}" - else - local url="https://${release_name}-backstage-${namespace}.${K8S_CLUSTER_ROUTER_BASE}" - fi - - local max_attempts=30 - local wait_seconds=30 - - echo "Checking if Backstage is up and running at ${url}" - - for ((i = 1; i <= max_attempts; i++)); do - local http_status - http_status=$(curl --insecure -I -s -o /dev/null -w "%{http_code}" "${url}") - - if [ "${http_status}" -eq 200 ]; then - echo "Backstage is up and running!" - export BASE_URL="${url}" - echo "######## BASE URL ########" - echo "${BASE_URL}" - return 0 - else - echo "Attempt ${i} of ${max_attempts}: Backstage not yet available (HTTP Status: ${http_status})" - sleep "${wait_seconds}" - fi - done - - echo "Failed to reach Backstage at ${BASE_URL} after ${max_attempts} attempts." | tee -a "/tmp/${LOGFILE}" - cp -a "/tmp/${LOGFILE}" "${ARTIFACT_DIR}/${namespace}/" - return 1 -} - -install_tekton_pipelines() { - local dir=$1 - - if oc get pods -n "tekton-pipelines" | grep -q "tekton-pipelines"; then - echo "Tekton Pipelines are already installed." - else - echo "Tekton Pipelines is not installed. Installing..." - oc apply --filename https://storage.googleapis.com/tekton-releases/pipeline/latest/release.yaml - fi -} - -install_pipelines_operator() { - local dir=$1 - DISPLAY_NAME="Red Hat OpenShift Pipelines" - - if oc get csv -n "openshift-operators" | grep -q "${DISPLAY_NAME}"; then - echo "Red Hat OpenShift Pipelines operator is already installed." - else - echo "Red Hat OpenShift Pipelines operator is not installed. Installing..." - oc apply -f "${dir}/resources/pipeline-run/pipelines-operator.yaml" - fi -} - -initiate_deployments() { - - #install_pipelines_operator - install_crunchy_postgres_operator - install_helm - add_helm_repos - - configure_namespace "${NAME_SPACE}" - uninstall_helmchart "${NAME_SPACE}" "${RELEASE_NAME}" - - # Deploy redis cache db. - oc apply -f "$DIR/resources/redis-cache/redis-deployment.yaml" --namespace="${NAME_SPACE}" - - cd "${DIR}" - apply_yaml_files "${DIR}" "${NAME_SPACE}" - echo "Deploying image from repository: ${QUAY_REPO}, TAG_NAME: ${TAG_NAME}, in NAME_SPACE: ${NAME_SPACE}" - helm upgrade -i "${RELEASE_NAME}" -n "${NAME_SPACE}" "${HELM_REPO_NAME}/${HELM_IMAGE_NAME}" --version "${CHART_VERSION}" -f "${DIR}/value_files/${HELM_CHART_VALUE_FILE_NAME}" --set global.clusterRouterBase="${K8S_CLUSTER_ROUTER_BASE}" --set upstream.backstage.image.repository="${QUAY_REPO}" --set upstream.backstage.image.tag="${TAG_NAME}" - - configure_namespace "${NAME_SPACE_POSTGRES_DB}" - configure_namespace "${NAME_SPACE_RBAC}" - configure_external_postgres_db "${NAME_SPACE_RBAC}" - - uninstall_helmchart "${NAME_SPACE_RBAC}" "${RELEASE_NAME_RBAC}" - apply_yaml_files "${DIR}" "${NAME_SPACE_RBAC}" - echo "Deploying image from repository: ${QUAY_REPO}, TAG_NAME: ${TAG_NAME}, in NAME_SPACE: ${RELEASE_NAME_RBAC}" - helm upgrade -i "${RELEASE_NAME_RBAC}" -n "${NAME_SPACE_RBAC}" "${HELM_REPO_NAME}/${HELM_IMAGE_NAME}" --version "${CHART_VERSION}" -f "${DIR}/value_files/${HELM_CHART_RBAC_VALUE_FILE_NAME}" --set global.clusterRouterBase="${K8S_CLUSTER_ROUTER_BASE}" --set upstream.backstage.image.repository="${QUAY_REPO}" --set upstream.backstage.image.tag="${TAG_NAME}" -} - -initiate_rds_deployment() { - local release_name=$1 - local namespace=$2 - configure_namespace "${namespace}" - uninstall_helmchart "${namespace}" "${release_name}" - sed -i "s|POSTGRES_USER:.*|POSTGRES_USER: $RDS_USER|g" "${DIR}/resources/postgres-db/postgres-cred.yaml" - sed -i "s|POSTGRES_PASSWORD:.*|POSTGRES_PASSWORD: $(echo -n $RDS_PASSWORD | base64 -w 0)|g" "${DIR}/resources/postgres-db/postgres-cred.yaml" - sed -i "s|POSTGRES_HOST:.*|POSTGRES_HOST: $(echo -n $RDS_1_HOST | base64 -w 0)|g" "${DIR}/resources/postgres-db/postgres-cred.yaml" - oc apply -f "$DIR/resources/postgres-db/postgres-crt-rds.yaml" -n "${namespace}" - oc apply -f "$DIR/resources/postgres-db/postgres-cred.yaml" -n "${namespace}" - oc apply -f "$DIR/resources/postgres-db/dynamic-plugins-root-PVC.yaml" -n "${namespace}" - helm upgrade -i "${release_name}" -n "${namespace}" "${HELM_REPO_NAME}/${HELM_IMAGE_NAME}" --version "${CHART_VERSION}" -f "$DIR/resources/postgres-db/values-showcase-postgres.yaml" --set global.clusterRouterBase="${K8S_CLUSTER_ROUTER_BASE}" --set upstream.backstage.image.repository="${QUAY_REPO}" --set upstream.backstage.image.tag="${TAG_NAME}" -} - -check_and_test() { - local release_name=$1 - local namespace=$2 - if check_backstage_running "${release_name}" "${namespace}"; then - echo "Display pods for verification..." - oc get pods -n "${namespace}" - run_tests "${release_name}" "${namespace}" - else - echo "Backstage is not running. Exiting..." - OVERALL_RESULT=1 - fi - save_all_pod_logs $namespace -} - -# Function to remove finalizers from specific resources in a namespace that are blocking deletion. -remove_finalizers_from_resources() { - local project=$1 - echo "Removing finalizers from resources in namespace ${project} that are blocking deletion." - - # Remove finalizers from stuck PipelineRuns and TaskRuns - for resource_type in "pipelineruns.tekton.dev" "taskruns.tekton.dev"; do - for resource in $(oc get "$resource_type" -n "$project" -o name); do - oc patch "$resource" -n "$project" --type='merge' -p '{"metadata":{"finalizers":[]}}' || true - echo "Removed finalizers from $resource in $project." - done - done - - # Check and remove specific finalizers stuck on 'chains.tekton.dev' resources - for chain_resource in $(oc get pipelineruns.tekton.dev,taskruns.tekton.dev -n "$project" -o name); do - oc patch "$chain_resource" -n "$project" --type='json' -p='[{"op": "remove", "path": "/metadata/finalizers"}]' || true - echo "Removed Tekton finalizers from $chain_resource in $project." - done -} - -# Function to forcibly delete a namespace stuck in 'Terminating' status -force_delete_namespace() { - local project=$1 - echo "Forcefully deleting namespace ${project}." - oc get namespace "$project" -o json | jq '.spec = {"finalizers":[]}' | oc replace --raw "/api/v1/namespaces/$project/finalize" -f - -} +source "${DIR}/env_variables.sh" +echo "Loaded env_variables.sh" +source "${DIR}/utils.sh" +echo "Loaded utils.sh" +source "${DIR}/jobs/aks.sh" +echo "Loaded aks.sh" +source "${DIR}/jobs/gke.sh" +echo "Loaded gke.sh" +source "${DIR}/jobs/main.sh" +echo "Loaded main.sh" +source "${DIR}/jobs/ocp-v4-15.sh" +echo "Loaded ocp-v4-15.sh" +source "${DIR}/jobs/ocp-v4-16.sh" +echo "Loaded ocp-v4-16.sh" +source "${DIR}/jobs/operator.sh" +echo "Loaded operator.sh" +source "${DIR}/jobs/periodic.sh" +echo "Loaded periodic.sh" main() { echo "Log file: ${LOGFILE}" - set_cluster_info - source "${DIR}/env_variables.sh" - - install_oc - if [[ "$JOB_NAME" == *aks* ]]; then - az_login - az_aks_start "${AKS_NIGHTLY_CLUSTER_NAME}" "${AKS_NIGHTLY_CLUSTER_RESOURCEGROUP}" - az_aks_approuting_enable "${AKS_NIGHTLY_CLUSTER_NAME}" "${AKS_NIGHTLY_CLUSTER_RESOURCEGROUP}" - az_aks_get_credentials "${AKS_NIGHTLY_CLUSTER_NAME}" "${AKS_NIGHTLY_CLUSTER_RESOURCEGROUP}" - elif [[ "$JOB_NAME" == *gke* ]]; then - gcloud_auth "${GKE_SERVICE_ACCOUNT_NAME}" "/tmp/secrets/GKE_SERVICE_ACCOUNT_KEY" - gcloud_gke_get_credentials "${GKE_CLUSTER_NAME}" "${GKE_CLUSTER_REGION}" "${GOOGLE_CLOUD_PROJECT}" - else - oc login --token="${K8S_CLUSTER_TOKEN}" --server="${K8S_CLUSTER_URL}" - fi - echo "OCP version: $(oc version)" - - set_namespace - - API_SERVER_URL=$(oc whoami --show-server) - if [[ "$JOB_NAME" == *aks* ]]; then - export K8S_CLUSTER_ROUTER_BASE=$AKS_INSTANCE_DOMAIN_NAME - elif [[ "$JOB_NAME" == *gke* ]]; then - export K8S_CLUSTER_ROUTER_BASE=$GKE_INSTANCE_DOMAIN_NAME - else - export K8S_CLUSTER_ROUTER_BASE=$(oc get route console -n openshift-console -o=jsonpath='{.spec.host}' | sed 's/^[^.]*\.//') - fi - - echo "K8S_CLUSTER_ROUTER_BASE : $K8S_CLUSTER_ROUTER_BASE" - - ENCODED_API_SERVER_URL=$(echo "${API_SERVER_URL}" | base64) - ENCODED_CLUSTER_NAME=$(echo "my-cluster" | base64) - - if [[ "$JOB_NAME" == *aks* ]]; then - initiate_aks_deployment - check_and_test "${RELEASE_NAME}" "${NAME_SPACE_K8S}" - delete_namespace "${NAME_SPACE_K8S}" - initiate_rbac_aks_deployment - check_and_test "${RELEASE_NAME_RBAC}" "${NAME_SPACE_RBAC_K8S}" - delete_namespace "${NAME_SPACE_RBAC_K8S}" - elif [[ "$JOB_NAME" == *gke* ]]; then - initiate_gke_deployment - check_and_test "${RELEASE_NAME}" "${NAME_SPACE_K8S}" - delete_namespace "${NAME_SPACE_K8S}" - initiate_rbac_gke_deployment - check_and_test "${RELEASE_NAME_RBAC}" "${NAME_SPACE_RBAC_K8S}" - delete_namespace "${NAME_SPACE_RBAC_K8S}" - elif [[ "$JOB_NAME" == *auth-providers* ]]; then - run_tests "${AUTH_PROVIDERS_RELEASE}" "${AUTH_PROVIDERS_NAMESPACE}" - else - initiate_deployments - check_and_test "${RELEASE_NAME}" "${NAME_SPACE}" - check_and_test "${RELEASE_NAME_RBAC}" "${NAME_SPACE_RBAC}" - # Only test TLS config with RDS and Change configuration at runtime in nightly jobs - if [[ "$JOB_NAME" == *periodic* ]]; then - initiate_rds_deployment "${RELEASE_NAME}" "${NAME_SPACE_RDS}" - check_and_test "${RELEASE_NAME}" "${NAME_SPACE_RDS}" - - # Deploy `showcase-runtime` to run tests that require configuration changes at runtime - configure_namespace "${NAME_SPACE_RUNTIME}" - uninstall_helmchart "${NAME_SPACE_RUNTIME}" "${RELEASE_NAME}" - oc apply -f "$DIR/resources/redis-cache/redis-deployment.yaml" --namespace="${NAME_SPACE_RUNTIME}" - apply_yaml_files "${DIR}" "${NAME_SPACE_RUNTIME}" - helm upgrade -i "${RELEASE_NAME}" -n "${NAME_SPACE_RUNTIME}" "${HELM_REPO_NAME}/${HELM_IMAGE_NAME}" --version "${CHART_VERSION}" -f "${DIR}/value_files/${HELM_CHART_VALUE_FILE_NAME}" --set global.clusterRouterBase="${K8S_CLUSTER_ROUTER_BASE}" --set upstream.backstage.image.repository="${QUAY_REPO}" --set upstream.backstage.image.tag="${TAG_NAME}" - check_and_test "${RELEASE_NAME}" "${NAME_SPACE_RUNTIME}" - fi - fi + echo "JOB_NAME : $JOB_NAME" + + case "$JOB_NAME" in + *aks*) + echo "Calling handle_aks" + handle_aks + ;; + *gke*) + echo "Calling handle_gke" + handle_gke + ;; + *periodic*) + echo "Calling handle_periodic" + handle_nightly + ;; + *pull-*-main-e2e-tests*) + echo "Calling handle_main" + handle_main + ;; + *ocp-v4-16*) + echo "Calling handle_ocp_v4_16" + handle_ocp_v4_16 + ;; + *ocp-v4-15*) + echo "Calling handle_ocp_v4_15" + handle_ocp_v4_15 + ;; + *operator*) + echo "Calling Operator" + handle_operator + ;; + esac + +echo "K8S_CLUSTER_ROUTER_BASE : $K8S_CLUSTER_ROUTER_BASE" +echo "Main script completed with result: ${OVERALL_RESULT}" +exit "${OVERALL_RESULT}" - exit "${OVERALL_RESULT}" } main diff --git a/.ibm/pipelines/utils.sh b/.ibm/pipelines/utils.sh index 17a235bc55..c8fe52b958 100755 --- a/.ibm/pipelines/utils.sh +++ b/.ibm/pipelines/utils.sh @@ -1,5 +1,7 @@ #!/bin/sh +set -x + retrieve_pod_logs() { local pod_name=$1; local container=$2; local namespace=$3 echo " Retrieving logs for container: $container" @@ -24,7 +26,7 @@ save_all_pod_logs(){ for init_container in $init_containers; do retrieve_pod_logs $pod_name $init_container $namespace done - + containers=$(kubectl get pod $pod_name -n $namespace -o jsonpath='{.spec.containers[*].name}') for container in $containers; do retrieve_pod_logs $pod_name $container $namespace @@ -48,7 +50,7 @@ droute_send() { local project=$2 local droute_project="droute" METEDATA_OUTPUT="data_router_metadata_output.json" - + oc login --token="${RHDH_PR_OS_CLUSTER_TOKEN}" --server="${RHDH_PR_OS_CLUSTER_URL}" oc whoami --show-server local droute_pod_name=$(oc get pods -n droute --no-headers -o custom-columns=":metadata.name" | grep ubi9-cert-rsync) @@ -272,16 +274,424 @@ install_crunchy_postgres_operator(){ install_subscription crunchy-postgres-operator openshift-operators crunchy-postgres-operator v5 certified-operators } -# Installs the Red Hat OpenShift Pipelines operator if not already installed +add_helm_repos() { + helm version + + local repos=( + "bitnami=https://charts.bitnami.com/bitnami" + "backstage=https://backstage.github.io/charts" + "${HELM_REPO_NAME}=${HELM_REPO_URL}" + ) + + for repo in "${repos[@]}"; do + local key="${repo%%=*}" + local value="${repo##*=}" + + if ! helm repo list | grep -q "^$key"; then + helm repo add "$key" "$value" + else + echo "Repository $key already exists - updating repository instead." + fi + done + + helm repo update +} + +uninstall_helmchart() { + local project=$1 + local release=$2 + if helm list -n "${project}" | grep -q "${release}"; then + echo "Chart already exists. Removing it before install." + helm uninstall "${release}" -n "${project}" + fi +} + +configure_namespace() { + local project=$1 + echo "Deleting and recreating namespace: $project" + delete_namespace $project + + if ! oc create namespace "${project}"; then + echo "Error: Failed to create namespace ${project}" >&2 + exit 1 + fi + if ! oc config set-context --current --namespace="${project}"; then + echo "Error: Failed to set context for namespace ${project}" >&2 + exit 1 + fi + + echo "Namespace ${project} is ready." +} + +delete_namespace() { + local project=$1 + if oc get namespace "$project" >/dev/null 2>&1; then + echo "Namespace ${project} exists. Attempting to delete..." + + # Remove blocking finalizers + remove_finalizers_from_resources "$project" + + # Attempt to delete the namespace + oc delete namespace "$project" --grace-period=0 --force || true + + # Check if namespace is still stuck in 'Terminating' and force removal if necessary + if oc get namespace "$project" -o jsonpath='{.status.phase}' | grep -q 'Terminating'; then + echo "Namespace ${project} is stuck in Terminating. Forcing deletion..." + force_delete_namespace "$project" + fi + fi +} + +configure_external_postgres_db() { + local project=$1 + oc apply -f "${DIR}/resources/postgres-db/postgres.yaml" --namespace="${NAME_SPACE_POSTGRES_DB}" + sleep 5 + + oc get secret postgress-external-db-cluster-cert -n "${NAME_SPACE_POSTGRES_DB}" -o jsonpath='{.data.ca\.crt}' | base64 --decode > postgres-ca + oc get secret postgress-external-db-cluster-cert -n "${NAME_SPACE_POSTGRES_DB}" -o jsonpath='{.data.tls\.crt}' | base64 --decode > postgres-tls-crt + oc get secret postgress-external-db-cluster-cert -n "${NAME_SPACE_POSTGRES_DB}" -o jsonpath='{.data.tls\.key}' | base64 --decode > postgres-tsl-key + + oc create secret generic postgress-external-db-cluster-cert \ + --from-file=ca.crt=postgres-ca \ + --from-file=tls.crt=postgres-tls-crt \ + --from-file=tls.key=postgres-tsl-key \ + --dry-run=client -o yaml | oc apply -f - --namespace="${project}" + + POSTGRES_PASSWORD=$(oc get secret/postgress-external-db-pguser-janus-idp -n "${NAME_SPACE_POSTGRES_DB}" -o jsonpath={.data.password}) + sed -i "s|POSTGRES_PASSWORD:.*|POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}|g" "${DIR}/resources/postgres-db/postgres-cred.yaml" + POSTGRES_HOST=$(echo -n "postgress-external-db-primary.$NAME_SPACE_POSTGRES_DB.svc.cluster.local" | base64 | tr -d '\n') + sed -i "s|POSTGRES_HOST:.*|POSTGRES_HOST: ${POSTGRES_HOST}|g" "${DIR}/resources/postgres-db/postgres-cred.yaml" + oc apply -f "${DIR}/resources/postgres-db/postgres-cred.yaml" --namespace="${project}" +} + +set_github_app_3_credentials() { + GITHUB_APP_APP_ID=$GITHUB_APP_3_APP_ID + GITHUB_APP_CLIENT_ID=$GITHUB_APP_3_CLIENT_ID + GITHUB_APP_PRIVATE_KEY=$GITHUB_APP_3_PRIVATE_KEY + GITHUB_APP_CLIENT_SECRET=$GITHUB_APP_3_CLIENT_SECRET + + export GITHUB_APP_APP_ID + export GITHUB_APP_CLIENT_ID + export GITHUB_APP_PRIVATE_KEY + export GITHUB_APP_CLIENT_SECRET + echo "GitHub App 3 credentials set for current job." +} + +set_github_app_4_credentials() { + GITHUB_APP_APP_ID=$(cat /tmp/secrets/GITHUB_APP_4_APP_ID) + GITHUB_APP_CLIENT_ID=$(cat /tmp/secrets/GITHUB_APP_4_CLIENT_ID) + GITHUB_APP_PRIVATE_KEY=$(cat /tmp/secrets/GITHUB_APP_4_PRIVATE_KEY) + GITHUB_APP_CLIENT_SECRET=$(cat /tmp/secrets/GITHUB_APP_4_CLIENT_SECRET) + + export GITHUB_APP_APP_ID + export GITHUB_APP_CLIENT_ID + export GITHUB_APP_PRIVATE_KEY + export GITHUB_APP_CLIENT_SECRET + echo "GitHub App 4 credentials set for current job." +} + +apply_yaml_files() { + local dir=$1 + local project=$2 + echo "Applying YAML files to namespace ${project}" + + oc config set-context --current --namespace="${project}" + + local files=( + "$dir/resources/service_account/service-account-rhdh.yaml" + "$dir/resources/cluster_role_binding/cluster-role-binding-k8s.yaml" + "$dir/resources/cluster_role/cluster-role-k8s.yaml" + "$dir/resources/cluster_role/cluster-role-ocm.yaml" + "$dir/auth/secrets-rhdh-secrets.yaml" + ) + + for file in "${files[@]}"; do + sed -i "s/namespace:.*/namespace: ${project}/g" "$file" + done + + DH_TARGET_URL=$(echo -n "test-backstage-customization-provider-${project}.${K8S_CLUSTER_ROUTER_BASE}" | base64 -w 0) + + for key in GITHUB_APP_APP_ID GITHUB_APP_CLIENT_ID GITHUB_APP_PRIVATE_KEY GITHUB_APP_CLIENT_SECRET GITHUB_APP_JANUS_TEST_APP_ID GITHUB_APP_JANUS_TEST_CLIENT_ID GITHUB_APP_JANUS_TEST_CLIENT_SECRET GITHUB_APP_JANUS_TEST_PRIVATE_KEY GITHUB_APP_WEBHOOK_URL GITHUB_APP_WEBHOOK_SECRET KEYCLOAK_CLIENT_SECRET ACR_SECRET GOOGLE_CLIENT_ID GOOGLE_CLIENT_SECRET K8S_CLUSTER_TOKEN_ENCODED OCM_CLUSTER_URL GITLAB_TOKEN DH_TARGET_URL; do + sed -i "s|${key}:.*|${key}: ${!key}|g" "$dir/auth/secrets-rhdh-secrets.yaml" + done + + oc apply -f "$dir/resources/service_account/service-account-rhdh.yaml" --namespace="${project}" + oc apply -f "$dir/auth/service-account-rhdh-secret.yaml" --namespace="${project}" + oc apply -f "$dir/auth/secrets-rhdh-secrets.yaml" --namespace="${project}" + + oc apply -f "$dir/resources/cluster_role/cluster-role-k8s.yaml" --namespace="${project}" + oc apply -f "$dir/resources/cluster_role_binding/cluster-role-binding-k8s.yaml" --namespace="${project}" + oc apply -f "$dir/resources/cluster_role/cluster-role-ocm.yaml" --namespace="${project}" + oc apply -f "$dir/resources/cluster_role_binding/cluster-role-binding-ocm.yaml" --namespace="${project}" + + escaped_url=$(printf '%s\n' "${ENCODED_API_SERVER_URL}" | sed 's/[\/&]/\\&/g') + sed -i "s/K8S_CLUSTER_API_SERVER_URL:.*/K8S_CLUSTER_API_SERVER_URL: ${escaped_url}/g" "$dir/auth/secrets-rhdh-secrets.yaml" \ + && echo "Updated K8S_CLUSTER_API_SERVER_URL in secrets file." \ + || echo "Failed to update K8S_CLUSTER_API_SERVER_URL." >&2 + + sed -i "s/K8S_CLUSTER_NAME:.*/K8S_CLUSTER_NAME: ${ENCODED_CLUSTER_NAME}/g" "$dir/auth/secrets-rhdh-secrets.yaml" + + token=$(oc get secret "${secret_name}" -n "${project}" -o=jsonpath='{.data.token}') + sed -i "s/OCM_CLUSTER_TOKEN: .*/OCM_CLUSTER_TOKEN: ${token}/" "$dir/auth/secrets-rhdh-secrets.yaml" + + # Select the configuration file based on the namespace or job + config_file=$(select_config_map_file) + # Apply the ConfigMap with the correct file + if [[ "${project}" == *showcase-k8s* ]]; then + create_app_config_map_k8s "$config_file" "$project" + else + create_app_config_map "$config_file" "$project" + fi + oc create configmap rbac-policy \ + --from-file="rbac-policy.csv"="$dir/resources/config_map/rbac-policy.csv" \ + --namespace="$project" \ + --dry-run=client -o yaml | oc apply -f - + + oc apply -f "$dir/auth/secrets-rhdh-secrets.yaml" --namespace="${project}" + +} + +deploy_test_backstage_provider() { + local project=$1 + echo "Deploying test-backstage-customization-provider in namespace ${project}" + + # Check if the buildconfig already exists + if ! oc get buildconfig test-backstage-customization-provider -n "${project}" >/dev/null 2>&1; then + echo "Creating new app for test-backstage-customization-provider" + oc new-app https://github.com/janus-qe/test-backstage-customization-provider --namespace="${project}" + else + echo "BuildConfig for test-backstage-customization-provider already exists in ${project}. Skipping new-app creation." + fi + + # Ensure the service exists + if ! oc get service test-backstage-customization-provider -n "${project}" >/dev/null 2>&1; then + echo "Exposing service for test-backstage-customization-provider" + oc expose svc/test-backstage-customization-provider --namespace="${project}" + else + echo "Service test-backstage-customization-provider is already exposed in ${project}." + fi +} + +create_app_config_map() { + local config_file=$1 + local project=$2 + + oc create configmap app-config-rhdh \ + --from-file="app-config-rhdh.yaml"="$config_file" \ + --namespace="$project" \ + --dry-run=client -o yaml | oc apply -f - +} + +select_config_map_file() { + if [[ "${project}" == *rbac* ]]; then + echo "$dir/resources/config_map/app-config-rhdh-rbac.yaml" + else + echo "$dir/resources/config_map/app-config-rhdh.yaml" + fi +} + +create_app_config_map_k8s() { + local config_file=$1 + local project=$2 + + echo "Creating app-config ConfigMap for AKS/GKE in namespace ${project}" + + yq 'del(.backend.cache)' "$config_file" \ + | oc create configmap app-config-rhdh \ + --from-file="app-config-rhdh.yaml"="/dev/stdin" \ + --namespace="${project}" \ + --dry-run=client -o yaml \ + | oc apply -f - +} + +run_tests() { + local release_name=$1 + local project=$2 + project=${project%-pr-*} # Remove -pr- suffix if any set for main branchs pr's. + cd "${DIR}/../../e2e-tests" + yarn install + yarn playwright install chromium + + Xvfb :99 & + export DISPLAY=:99 + + ( + set -e + echo "Using PR container image: ${TAG_NAME}" + yarn "$project" + ) 2>&1 | tee "/tmp/${LOGFILE}" + + local RESULT=${PIPESTATUS[0]} + + pkill Xvfb + + mkdir -p "${ARTIFACT_DIR}/${project}/test-results" + mkdir -p "${ARTIFACT_DIR}/${project}/attachments/screenshots" + cp -a /tmp/backstage-showcase/e2e-tests/test-results/* "${ARTIFACT_DIR}/${project}/test-results" + cp -a /tmp/backstage-showcase/e2e-tests/${JUNIT_RESULTS} "${ARTIFACT_DIR}/${project}/${JUNIT_RESULTS}" + + if [ -d "/tmp/backstage-showcase/e2e-tests/screenshots" ]; then + cp -a /tmp/backstage-showcase/e2e-tests/screenshots/* "${ARTIFACT_DIR}/${project}/attachments/screenshots/" + fi + + if [ -d "/tmp/backstage-showcase/e2e-tests/auth-providers-logs" ]; then + cp -a /tmp/backstage-showcase/e2e-tests/auth-providers-logs/* "${ARTIFACT_DIR}/${project}/" + fi + + ansi2html <"/tmp/${LOGFILE}" >"/tmp/${LOGFILE}.html" + cp -a "/tmp/${LOGFILE}.html" "${ARTIFACT_DIR}/${project}" + cp -a /tmp/backstage-showcase/e2e-tests/playwright-report/* "${ARTIFACT_DIR}/${project}" + + droute_send "${release_name}" "${project}" + + echo "${project} RESULT: ${RESULT}" + if [ "${RESULT}" -ne 0 ]; then + OVERALL_RESULT=1 + fi +} + +check_backstage_running() { + local release_name=$1 + local namespace=$2 + local url=$3 + + local max_attempts=30 + local wait_seconds=30 + + echo "Checking if Backstage is up and running at ${url}" + + for ((i = 1; i <= max_attempts; i++)); do + local http_status + http_status=$(curl --insecure -I -s -o /dev/null -w "%{http_code}" "${url}") + + if [ "${http_status}" -eq 200 ]; then + echo "Backstage is up and running!" + export BASE_URL="${url}" + echo "######## BASE URL ########" + echo "${BASE_URL}" + return 0 + else + echo "Attempt ${i} of ${max_attempts}: Backstage not yet available (HTTP Status: ${http_status})" + sleep "${wait_seconds}" + fi + done + + echo "Failed to reach Backstage at ${BASE_URL} after ${max_attempts} attempts." | tee -a "/tmp/${LOGFILE}" + cp -a "/tmp/${LOGFILE}" "${ARTIFACT_DIR}/${namespace}/" + return 1 +} + +install_tekton_pipelines() { + local dir=$1 + + if oc get pods -n "tekton-pipelines" | grep -q "tekton-pipelines"; then + echo "Tekton Pipelines are already installed." + else + echo "Tekton Pipelines is not installed. Installing..." + oc apply --filename https://storage.googleapis.com/tekton-releases/pipeline/latest/release.yaml + fi +} + install_pipelines_operator() { + local dir=$1 DISPLAY_NAME="Red Hat OpenShift Pipelines" - # Check if operator is already installed + if oc get csv -n "openshift-operators" | grep -q "${DISPLAY_NAME}"; then echo "Red Hat OpenShift Pipelines operator is already installed." else echo "Red Hat OpenShift Pipelines operator is not installed. Installing..." - # Install the operator and wait for deployment - install_subscription openshift-pipelines-operator openshift-operators openshift-pipelines-operator-rh latest redhat-operators - wait_for_deployment "openshift-operators" "pipelines" + oc apply -f "${dir}/resources/pipeline-run/pipelines-operator.yaml" + fi +} + +initiate_deployments() { + + install_crunchy_postgres_operator + add_helm_repos + + uninstall_helmchart "${NAME_SPACE}" "${RELEASE_NAME}" + + # Deploy redis cache db. + oc apply -f "$DIR/resources/redis-cache/redis-deployment.yaml" --namespace="${NAME_SPACE}" + + cd "${DIR}" + apply_yaml_files "${DIR}" "${NAME_SPACE}" + echo "Deploying image from repository: ${QUAY_REPO}, TAG_NAME: ${TAG_NAME}, in NAME_SPACE: ${NAME_SPACE}" + helm upgrade -i "${RELEASE_NAME}" -n "${NAME_SPACE}" "${HELM_REPO_NAME}/${HELM_IMAGE_NAME}" --version "${CHART_VERSION}" -f "${DIR}/value_files/${HELM_CHART_VALUE_FILE_NAME}" --set global.clusterRouterBase="${K8S_CLUSTER_ROUTER_BASE}" --set upstream.backstage.image.repository="${QUAY_REPO}" --set upstream.backstage.image.tag="${TAG_NAME}" + + configure_namespace "${NAME_SPACE_POSTGRES_DB}" + configure_namespace "${NAME_SPACE_RBAC}" + configure_external_postgres_db "${NAME_SPACE_RBAC}" + + uninstall_helmchart "${NAME_SPACE_RBAC}" "${RELEASE_NAME_RBAC}" + apply_yaml_files "${DIR}" "${NAME_SPACE_RBAC}" + echo "Deploying image from repository: ${QUAY_REPO}, TAG_NAME: ${TAG_NAME}, in NAME_SPACE: ${RELEASE_NAME_RBAC}" + helm upgrade -i "${RELEASE_NAME_RBAC}" -n "${NAME_SPACE_RBAC}" "${HELM_REPO_NAME}/${HELM_IMAGE_NAME}" --version "${CHART_VERSION}" -f "${DIR}/value_files/${HELM_CHART_RBAC_VALUE_FILE_NAME}" --set global.clusterRouterBase="${K8S_CLUSTER_ROUTER_BASE}" --set upstream.backstage.image.repository="${QUAY_REPO}" --set upstream.backstage.image.tag="${TAG_NAME}" +} + +initiate_rds_deployment() { + local release_name=$1 + local namespace=$2 + configure_namespace "${namespace}" + uninstall_helmchart "${namespace}" "${release_name}" + sed -i "s|POSTGRES_USER:.*|POSTGRES_USER: $RDS_USER|g" "${DIR}/resources/postgres-db/postgres-cred.yaml" + sed -i "s|POSTGRES_PASSWORD:.*|POSTGRES_PASSWORD: $(echo -n $RDS_PASSWORD | base64 -w 0)|g" "${DIR}/resources/postgres-db/postgres-cred.yaml" + sed -i "s|POSTGRES_HOST:.*|POSTGRES_HOST: $(echo -n $RDS_1_HOST | base64 -w 0)|g" "${DIR}/resources/postgres-db/postgres-cred.yaml" + oc apply -f "$DIR/resources/postgres-db/postgres-crt-rds.yaml" -n "${namespace}" + oc apply -f "$DIR/resources/postgres-db/postgres-cred.yaml" -n "${namespace}" + oc apply -f "$DIR/resources/postgres-db/dynamic-plugins-root-PVC.yaml" -n "${namespace}" + helm upgrade -i "${release_name}" -n "${namespace}" "${HELM_REPO_NAME}/${HELM_IMAGE_NAME}" --version "${CHART_VERSION}" -f "$DIR/resources/postgres-db/values-showcase-postgres.yaml" --set global.clusterRouterBase="${K8S_CLUSTER_ROUTER_BASE}" --set upstream.backstage.image.repository="${QUAY_REPO}" --set upstream.backstage.image.tag="${TAG_NAME}" +} + +check_and_test() { + local release_name=$1 + local namespace=$2 + local url=$3 + if check_backstage_running "${release_name}" "${namespace}" "${url}"; then + echo "Display pods for verification..." + oc get pods -n "${namespace}" + run_tests "${release_name}" "${namespace}" + else + echo "Backstage is not running. Exiting..." + OVERALL_RESULT=1 fi + save_all_pod_logs $namespace } + +# Function to remove finalizers from specific resources in a namespace that are blocking deletion. +remove_finalizers_from_resources() { + local project=$1 + echo "Removing finalizers from resources in namespace ${project} that are blocking deletion." + + # Remove finalizers from stuck PipelineRuns and TaskRuns + for resource_type in "pipelineruns.tekton.dev" "taskruns.tekton.dev"; do + for resource in $(oc get "$resource_type" -n "$project" -o name); do + oc patch "$resource" -n "$project" --type='merge' -p '{"metadata":{"finalizers":[]}}' || true + echo "Removed finalizers from $resource in $project." + done + done + + # Check and remove specific finalizers stuck on 'chains.tekton.dev' resources + for chain_resource in $(oc get pipelineruns.tekton.dev,taskruns.tekton.dev -n "$project" -o name); do + oc patch "$chain_resource" -n "$project" --type='json' -p='[{"op": "remove", "path": "/metadata/finalizers"}]' || true + echo "Removed Tekton finalizers from $chain_resource in $project." + done +} + +# Function to forcibly delete a namespace stuck in 'Terminating' status +force_delete_namespace() { + local project=$1 + echo "Forcefully deleting namespace ${project}." + oc get namespace "$project" -o json | jq '.spec = {"finalizers":[]}' | oc replace --raw "/api/v1/namespaces/$project/finalize" -f - +} + +oc_login() { + export K8S_CLUSTER_URL=$(cat /tmp/secrets/RHDH_PR_OS_CLUSTER_URL) + export K8S_CLUSTER_TOKEN=$(cat /tmp/secrets/RHDH_PR_OS_CLUSTER_TOKEN) + + oc login --token="${K8S_CLUSTER_TOKEN}" --server="${K8S_CLUSTER_URL}" + echo "OCP version: $(oc version)" + export K8S_CLUSTER_ROUTER_BASE=$(oc get route console -n openshift-console -o=jsonpath='{.spec.host}' | sed 's/^[^.]*\.//') +} + + diff --git a/e2e-tests/playwright.config.ts b/e2e-tests/playwright.config.ts index 6c880fa068..d60763886f 100644 --- a/e2e-tests/playwright.config.ts +++ b/e2e-tests/playwright.config.ts @@ -53,6 +53,7 @@ export default defineConfig({ "**/playwright/e2e/plugins/bulk-import.spec.ts", "**/playwright/e2e/verify-tls-config-health-check.spec.ts", "**/playwright/e2e/configuration-test/config-map.spec.ts", + "**/playwright/e2e/plugins/tekton/tekton.spec.ts", ], }, { diff --git a/e2e-tests/playwright/e2e/plugins/topology/topology.spec.ts b/e2e-tests/playwright/e2e/plugins/topology/topology.spec.ts index 845ef4b877..b29ffd22ff 100644 --- a/e2e-tests/playwright/e2e/plugins/topology/topology.spec.ts +++ b/e2e-tests/playwright/e2e/plugins/topology/topology.spec.ts @@ -3,7 +3,8 @@ import { Common } from "../../../utils/common"; import { UIhelper } from "../../../utils/ui-helper"; import { Catalog } from "../../../support/pages/catalog"; -test.describe("Test Topology Plugin", () => { +// Test disabled due to comments in JIRA ticket RHIDP-3437 +test.describe.skip("Test Topology Plugin", () => { let common: Common; let uiHelper: UIhelper; let catalog: Catalog; From d871ec6f3b2f6c7c224523b86c40bb8d0a02d4a0 Mon Sep 17 00:00:00 2001 From: Gustavo Lira e Silva Date: Fri, 6 Dec 2024 20:50:18 -0300 Subject: [PATCH 6/6] Reconfigure namespace setup in deployment script (#2071) --- .ibm/pipelines/utils.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.ibm/pipelines/utils.sh b/.ibm/pipelines/utils.sh index c8fe52b958..65f0632800 100755 --- a/.ibm/pipelines/utils.sh +++ b/.ibm/pipelines/utils.sh @@ -608,8 +608,7 @@ initiate_deployments() { install_crunchy_postgres_operator add_helm_repos - - uninstall_helmchart "${NAME_SPACE}" "${RELEASE_NAME}" + configure_namespace ${NAME_SPACE} # Deploy redis cache db. oc apply -f "$DIR/resources/redis-cache/redis-deployment.yaml" --namespace="${NAME_SPACE}"