diff --git a/.github/workflows/pr_check_workflow.yml b/.github/workflows/pr_check_workflow.yml
index a7f36b24d478..c4f1f0de093c 100644
--- a/.github/workflows/pr_check_workflow.yml
+++ b/.github/workflows/pr_check_workflow.yml
@@ -11,7 +11,6 @@ on:
branches: [ '**', '!feature/**' ]
env:
- CACHE_NAME: osd-node-modules
TEST_BROWSER_HEADLESS: 1
CI: 1
GCS_UPLOAD_PREFIX: fake
@@ -26,108 +25,40 @@ jobs:
runs-on: ubuntu-latest
name: Build and Verify
steps:
- # Access a cache of set results from a previous run of the job
- # This is to prevent re-running steps that were already successful since it is not native to github actions
- # Can be used to verify flaky steps with reduced times
- - name: Restore the cached run
- uses: actions/cache@v2
- with:
- path: |
- job_successful
- linter_results
- unit_tests_results
- integration_tests_results
- key: ${{ github.run_id }}-${{ github.job }}-${{ github.sha }}
- restore-keys: |
- ${{ github.run_id }}-${{ github.job }}-${{ github.sha }}
-
- - name: Get if previous job was successful
- id: job_successful
- run: cat job_successful 2>/dev/null || echo 'false'
-
- - name: Get the previous linter results
- id: linter_results
- run: cat linter_results 2>/dev/null || echo 'default'
-
- - name: Get the previous unit tests results
- id: unit_tests_results
- run: cat unit_tests_results 2>/dev/null || echo 'default'
-
- - name: Get the previous integration tests results
- id: integration_tests_results
- run: cat integration_tests_results 2>/dev/null || echo 'default'
- name: Checkout code
- if: steps.job_successful.outputs.job_successful != 'true'
uses: actions/checkout@v2
- name: Setup Node
- if: steps.job_successful.outputs.job_successful != 'true'
uses: actions/setup-node@v2
with:
node-version-file: ".nvmrc"
registry-url: 'https://registry.npmjs.org'
- name: Setup Yarn
- if: steps.job_successful.outputs.job_successful != 'true'
run: |
npm uninstall -g yarn
npm i -g yarn@1.22.10
- name: Run bootstrap
- if: steps.job_successful.outputs.job_successful != 'true'
run: yarn osd bootstrap
- name: Run linter
- if: steps.linter_results.outputs.linter_results != 'success'
id: linter
run: yarn lint
- # Runs unit tests while limiting workers because github actions will spawn more than it can handle and crash
- # Continues on error but will create a comment on the pull request if this step failed.
- name: Run unit tests
- if: steps.unit_tests_results.outputs.unit_tests_results != 'success'
id: unit-tests
- continue-on-error: true
- run: node scripts/jest --ci --colors --maxWorkers=10
- env:
- SKIP_BAD_APPLES: true
-
- - run: echo Unit tests completed unsuccessfully. However, unit tests are inconsistent on the CI so please verify locally with `yarn test:jest`.
- if: steps.unit_tests_results.outputs.unit_tests_results != 'success' && steps.unit-tests.outcome != 'success'
-
- # TODO: This gets rejected, we need approval to add this
- # - name: Add comment if unit tests did not succeed
- # if: steps.unit_tests_results.outputs.unit_tests_results != 'success' && steps.unit-tests.outcome != 'success'
- # uses: actions/github-script@v5
- # with:
- # github-token: ${{ secrets.GITHUB_TOKEN }}
- # script: |
- # github.rest.issues.createComment({
- # issue_number: context.issue.number,
- # owner: context.repo.owner,
- # repo: context.repo.repo,
- # body: 'Unit tests completed unsuccessfully. However, unit tests are inconsistent on the CI so please verify locally with `yarn test:jest`.'
- # })
+ run: yarn test:jest:ci
+
+ - name: Run mocha tests
+ id: mocha-tests
+ run: yarn test:mocha
- name: Run integration tests
- if: steps.integration_tests_results.outputs.integration_tests_results != 'success'
id: integration-tests
- run: node scripts/jest_integration --ci --colors --max-old-space-size=5120
-
- # Set cache if linter, unit tests, and integration tests were successful then the job will be marked successful
- # Sets individual results to empower re-runs of the same build without re-running successful steps.
- - if: |
- (steps.linter.outcome == 'success' || steps.linter.outcome == 'skipped') &&
- (steps.unit-tests.outcome == 'success' || steps.unit-tests.outcome == 'skipped') &&
- (steps.integration-tests.outcome == 'success' || steps.integration-tests.outcome == 'skipped')
- run: echo "::set-output name=job_successful::true" > job_successful
- - if: steps.linter.outcome == 'success' || steps.linter.outcome == 'skipped'
- run: echo "::set-output name=linter_results::success" > linter_results
- - if: steps.unit-tests.outcome == 'success' || steps.unit-tests.outcome == 'skipped'
- run: echo "::set-output name=unit_tests_results::success" > unit_tests_results
- - if: steps.integration-tests.outcome == 'success' || steps.integration-tests.outcome == 'skipped'
- run: echo "::set-output name=integration_tests_results::success" > integration_tests_results
+ run: yarn test:jest_integration:ci
+
functional-tests:
needs: [ build-lint-test ]
runs-on: ubuntu-latest
@@ -138,69 +69,35 @@ jobs:
steps:
- run: echo Running functional tests for ciGroup${{ matrix.group }}
- # Access a cache of set results from a previous run of the job
- # This is to prevent re-running a CI group that was already successful since it is not native to github actions
- # Can be used to verify flaky steps with reduced times
- - name: Restore the cached run
- uses: actions/cache@v2
- with:
- path: |
- ftr_tests_results
- key: ${{ github.run_id }}-${{ github.job }}-${{ matrix.group }}-${{ github.sha }}
- restore-keys: |
- ${{ github.run_id }}-${{ github.job }}-${{ matrix.group }}-${{ github.sha }}
-
- name: Get the cached tests results
id: ftr_tests_results
run: cat ftr_tests_results 2>/dev/null || echo 'default'
- name: Checkout code
- if: steps.ftr_tests_results.outputs.ftr_tests_results != 'success'
uses: actions/checkout@v2
- name: Setup Node
- if: steps.ftr_tests_results.outputs.ftr_tests_results != 'success'
uses: actions/setup-node@v2
with:
node-version-file: ".nvmrc"
registry-url: 'https://registry.npmjs.org'
- name: Setup Yarn
- if: steps.ftr_tests_results.outputs.ftr_tests_results != 'success'
run: |
npm uninstall -g yarn
npm i -g yarn@1.22.10
- - name: Get cache path
- if: steps.ftr_tests_results.outputs.ftr_tests_results != 'success'
- id: cache-path
- run: echo "::set-output name=CACHE_DIR::$(yarn cache dir)"
-
- - name: Setup cache
- if: steps.ftr_tests_results.outputs.ftr_tests_results != 'success'
- uses: actions/cache@v2
- with:
- path: ${{ steps.cache-path.outputs.CACHE_DIR }}
- key: ${{ runner.os }}-yarn-${{ env.CACHE_NAME }}-${{ hashFiles('**/yarn.lock') }}
- restore-keys: |
- ${{ runner.os }}-yarn-${{ env.CACHE_NAME }}-
- ${{ runner.os }}-yarn-
- ${{ runner.os }}-
-
# github virtual env is the latest chrome
- name: Setup chromedriver
- if: steps.ftr_tests_results.outputs.ftr_tests_results != 'success'
run: yarn add --dev chromedriver@97.0.0
- name: Run bootstrap
- if: steps.ftr_tests_results.outputs.ftr_tests_results != 'success'
run: yarn osd bootstrap
- name: Build plugins
- if: steps.ftr_tests_results.outputs.ftr_tests_results != 'success'
run: node scripts/build_opensearch_dashboards_platform_plugins --no-examples --workers 10
- - if: steps.ftr_tests_results.outputs.ftr_tests_results != 'success'
+ - name: Run CI test group ${{ matrix.group }}
id: ftr-tests
run: node scripts/functional_tests.js --config test/functional/config.js --include ciGroup${{ matrix.group }}
env:
@@ -208,6 +105,3 @@ jobs:
CI_PARALLEL_PROCESS_NUMBER: ciGroup${{ matrix.group }}
JOB: ci${{ matrix.group }}
CACHE_DIR: ciGroup${{ matrix.group }}
-
- - if: steps.ftr-tests.outcome == 'success' || steps.ftr-tests.outcome == 'skipped'
- run: echo "::set-output name=ftr_tests_results::success" > ftr_tests_results
diff --git a/package.json b/package.json
index ea0276fea51e..4346be0700b4 100644
--- a/package.json
+++ b/package.json
@@ -43,7 +43,9 @@
"test": "grunt test",
"test:bwc": "./scripts/bwctest_osd.sh",
"test:jest": "node scripts/jest",
+ "test:jest:ci": "node scripts/jest --ci --colors --runInBand",
"test:jest_integration": "node scripts/jest_integration",
+ "test:jest_integration:ci": "node scripts/jest_integration --ci --colors --max-old-space-size=5120",
"test:mocha": "node scripts/mocha",
"test:mocha:coverage": "grunt test:mochaCoverage",
"test:ftr": "node scripts/functional_tests",
diff --git a/packages/osd-pm/src/run.test.ts b/packages/osd-pm/src/run.test.ts
index f0081f8bc9af..6fc316971677 100644
--- a/packages/osd-pm/src/run.test.ts
+++ b/packages/osd-pm/src/run.test.ts
@@ -37,8 +37,6 @@ import { runCommand } from './run';
import { Project } from './utils/project';
import { log } from './utils/log';
-const testif = process.env.SKIP_BAD_APPLES === 'true' ? test.skip : test;
-
log.setLogLevel('silent');
const rootPath = resolve(`${__dirname}/utils/__fixtures__/opensearch-dashboards`);
@@ -72,14 +70,14 @@ beforeEach(() => {
};
});
-testif('passes all found projects to the command if no filter is specified', async () => {
+test('passes all found projects to the command if no filter is specified', async () => {
await runCommand(command, config);
expect(command.run).toHaveBeenCalledTimes(1);
expect(getExpectedProjectsAndGraph(command.run)).toMatchSnapshot();
});
-testif('excludes project if single `exclude` filter is specified', async () => {
+test('excludes project if single `exclude` filter is specified', async () => {
await runCommand(command, {
...config,
options: { exclude: 'foo' },
@@ -89,7 +87,7 @@ testif('excludes project if single `exclude` filter is specified', async () => {
expect(getExpectedProjectsAndGraph(command.run)).toMatchSnapshot();
});
-testif('excludes projects if multiple `exclude` filter are specified', async () => {
+test('excludes projects if multiple `exclude` filter are specified', async () => {
await runCommand(command, {
...config,
options: { exclude: ['foo', 'bar', 'baz'] },
@@ -99,7 +97,7 @@ testif('excludes projects if multiple `exclude` filter are specified', async ()
expect(getExpectedProjectsAndGraph(command.run)).toMatchSnapshot();
});
-testif('includes single project if single `include` filter is specified', async () => {
+test('includes single project if single `include` filter is specified', async () => {
await runCommand(command, {
...config,
options: { include: 'foo' },
@@ -109,7 +107,7 @@ testif('includes single project if single `include` filter is specified', async
expect(getExpectedProjectsAndGraph(command.run)).toMatchSnapshot();
});
-testif('includes only projects specified in multiple `include` filters', async () => {
+test('includes only projects specified in multiple `include` filters', async () => {
await runCommand(command, {
...config,
options: { include: ['foo', 'bar', 'baz'] },
@@ -119,7 +117,7 @@ testif('includes only projects specified in multiple `include` filters', async (
expect(getExpectedProjectsAndGraph(command.run)).toMatchSnapshot();
});
-testif('respects both `include` and `exclude` filters if specified at the same time', async () => {
+test('respects both `include` and `exclude` filters if specified at the same time', async () => {
await runCommand(command, {
...config,
options: { include: ['foo', 'bar', 'baz'], exclude: 'bar' },
@@ -129,7 +127,7 @@ testif('respects both `include` and `exclude` filters if specified at the same t
expect(getExpectedProjectsAndGraph(command.run)).toMatchSnapshot();
});
-testif('does not run command if all projects are filtered out', async () => {
+test('does not run command if all projects are filtered out', async () => {
const mockProcessExit = jest.spyOn(process, 'exit').mockReturnValue(undefined as never);
await runCommand(command, {
diff --git a/src/plugins/embeddable/public/lib/embeddables/error_embeddable.test.tsx b/src/plugins/embeddable/public/lib/embeddables/error_embeddable.test.tsx
index e8dddeadfa30..2eee0f32646c 100644
--- a/src/plugins/embeddable/public/lib/embeddables/error_embeddable.test.tsx
+++ b/src/plugins/embeddable/public/lib/embeddables/error_embeddable.test.tsx
@@ -34,9 +34,7 @@ import { waitFor, render } from '@testing-library/react';
import { ErrorEmbeddable } from './error_embeddable';
import { EmbeddableRoot } from './embeddable_root';
-const testif = process.env.SKIP_BAD_APPLES === 'true' ? test.skip : test;
-
-testif('ErrorEmbeddable renders an embeddable', async () => {
+test('ErrorEmbeddable renders an embeddable', async () => {
const embeddable = new ErrorEmbeddable('some error occurred', { id: '123', title: 'Error' });
const { getByTestId, getByText } = render();
@@ -45,7 +43,7 @@ testif('ErrorEmbeddable renders an embeddable', async () => {
expect(getByText(/some error occurred/i)).toBeVisible();
});
-testif('ErrorEmbeddable renders an embeddable with markdown message', async () => {
+test('ErrorEmbeddable renders an embeddable with markdown message', async () => {
const error = '[some link](http://localhost:5601/takeMeThere)';
const embeddable = new ErrorEmbeddable(error, { id: '123', title: 'Error' });
const { getByTestId, getByText } = render();
diff --git a/src/plugins/vis_type_markdown/public/markdown_vis_controller.test.tsx b/src/plugins/vis_type_markdown/public/markdown_vis_controller.test.tsx
index ecae64971128..082ae536f0d0 100644
--- a/src/plugins/vis_type_markdown/public/markdown_vis_controller.test.tsx
+++ b/src/plugins/vis_type_markdown/public/markdown_vis_controller.test.tsx
@@ -34,9 +34,7 @@ import React from 'react';
import { waitFor, render } from '@testing-library/react';
import MarkdownVisComponent from './markdown_vis_controller';
-const describeif = process.env.SKIP_BAD_APPLES === 'true' ? describe.skip : describe;
-
-describeif('markdown vis controller', () => {
+describe('markdown vis controller', () => {
it('should set html from markdown params', async () => {
const vis = {
params: {
diff --git a/src/plugins/vis_type_tagcloud/public/components/tag_cloud_visualization.test.js b/src/plugins/vis_type_tagcloud/public/components/tag_cloud_visualization.test.js
index c2c4f9ccbbc0..0a0d025f1d4c 100644
--- a/src/plugins/vis_type_tagcloud/public/components/tag_cloud_visualization.test.js
+++ b/src/plugins/vis_type_tagcloud/public/components/tag_cloud_visualization.test.js
@@ -37,10 +37,9 @@ import { setFormatService } from '../services';
import { dataPluginMock } from '../../../data/public/mocks';
import { setHTMLElementOffset, setSVGElementGetBBox } from '../../../../test_utils/public';
-const describeif = process.env.SKIP_BAD_APPLES === 'true' ? describe.skip : describe;
const seedColors = ['#00a69b', '#57c17b', '#6f87d8', '#663db8', '#bc52bc', '#9e3533', '#daa05d'];
-describeif('TagCloudVisualizationTest', () => {
+describe('TagCloudVisualizationTest', () => {
let domNode;
let visParams;
let SVGElementGetBBoxSpyInstance;
diff --git a/src/plugins/visualize/public/application/utils/use/use_visualize_app_state.test.ts b/src/plugins/visualize/public/application/utils/use/use_visualize_app_state.test.ts
index 776d814f6225..2cefb205c8c9 100644
--- a/src/plugins/visualize/public/application/utils/use/use_visualize_app_state.test.ts
+++ b/src/plugins/visualize/public/application/utils/use/use_visualize_app_state.test.ts
@@ -40,13 +40,11 @@ import { visualizeAppStateStub } from '../stubs';
import { VisualizeConstants } from '../../visualize_constants';
import { createVisualizeServicesMock } from '../mocks';
-const describeif = process.env.SKIP_BAD_APPLES === 'true' ? describe.skip : describe;
-
jest.mock('../utils');
jest.mock('../create_visualize_app_state');
jest.mock('../../../../../data/public');
-describeif('useVisualizeAppState', () => {
+describe('useVisualizeAppState', () => {
const { visStateToEditorState } = jest.requireMock('../utils');
const { createVisualizeAppState } = jest.requireMock('../create_visualize_app_state');
const { connectToQueryState } = jest.requireMock('../../../../../data/public');