Skip to content

Commit

Permalink
Runs GitHub workflow unit tests in band (opensearch-project#1306)
Browse files Browse the repository at this point in the history
* Removes caching from GitHub test workflow to simplify.
* Removes "bad apples" check from GitHub workflow since those tests now
  pass consistently.
* Runs unit tests in band to reduce execution time from ~90 minutes to
  ~20 minutes.
* Removes the logic that ignored failing unit tests.
* Adds mocha tests to the workflow.
* Adds ci-specific scripts to simplify the commands.
  * `yarn test:jest:ci` and `yarn test:jest_integration:ci` are now used
    in the workflow.

Resolves opensearch-project#1231

Signed-off-by: Tommy Markley <[email protected]>
  • Loading branch information
Tommy Markley authored Mar 4, 2022
1 parent 082f44a commit fef8af0
Show file tree
Hide file tree
Showing 7 changed files with 22 additions and 135 deletions.
122 changes: 8 additions & 114 deletions .github/workflows/pr_check_workflow.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@ on:
branches: [ '**', '!feature/**' ]

env:
CACHE_NAME: osd-node-modules
TEST_BROWSER_HEADLESS: 1
CI: 1
GCS_UPLOAD_PREFIX: fake
Expand All @@ -26,108 +25,40 @@ jobs:
runs-on: ubuntu-latest
name: Build and Verify
steps:
# Access a cache of set results from a previous run of the job
# This is to prevent re-running steps that were already successful since it is not native to github actions
# Can be used to verify flaky steps with reduced times
- name: Restore the cached run
uses: actions/cache@v2
with:
path: |
job_successful
linter_results
unit_tests_results
integration_tests_results
key: ${{ github.run_id }}-${{ github.job }}-${{ github.sha }}
restore-keys: |
${{ github.run_id }}-${{ github.job }}-${{ github.sha }}
- name: Get if previous job was successful
id: job_successful
run: cat job_successful 2>/dev/null || echo 'false'

- name: Get the previous linter results
id: linter_results
run: cat linter_results 2>/dev/null || echo 'default'

- name: Get the previous unit tests results
id: unit_tests_results
run: cat unit_tests_results 2>/dev/null || echo 'default'

- name: Get the previous integration tests results
id: integration_tests_results
run: cat integration_tests_results 2>/dev/null || echo 'default'

- name: Checkout code
if: steps.job_successful.outputs.job_successful != 'true'
uses: actions/checkout@v2

- name: Setup Node
if: steps.job_successful.outputs.job_successful != 'true'
uses: actions/setup-node@v2
with:
node-version-file: ".nvmrc"
registry-url: 'https://registry.npmjs.org'

- name: Setup Yarn
if: steps.job_successful.outputs.job_successful != 'true'
run: |
npm uninstall -g yarn
npm i -g [email protected]
- name: Run bootstrap
if: steps.job_successful.outputs.job_successful != 'true'
run: yarn osd bootstrap

- name: Run linter
if: steps.linter_results.outputs.linter_results != 'success'
id: linter
run: yarn lint

# Runs unit tests while limiting workers because github actions will spawn more than it can handle and crash
# Continues on error but will create a comment on the pull request if this step failed.
- name: Run unit tests
if: steps.unit_tests_results.outputs.unit_tests_results != 'success'
id: unit-tests
continue-on-error: true
run: node scripts/jest --ci --colors --maxWorkers=10
env:
SKIP_BAD_APPLES: true

- run: echo Unit tests completed unsuccessfully. However, unit tests are inconsistent on the CI so please verify locally with `yarn test:jest`.
if: steps.unit_tests_results.outputs.unit_tests_results != 'success' && steps.unit-tests.outcome != 'success'

# TODO: This gets rejected, we need approval to add this
# - name: Add comment if unit tests did not succeed
# if: steps.unit_tests_results.outputs.unit_tests_results != 'success' && steps.unit-tests.outcome != 'success'
# uses: actions/github-script@v5
# with:
# github-token: ${{ secrets.GITHUB_TOKEN }}
# script: |
# github.rest.issues.createComment({
# issue_number: context.issue.number,
# owner: context.repo.owner,
# repo: context.repo.repo,
# body: 'Unit tests completed unsuccessfully. However, unit tests are inconsistent on the CI so please verify locally with `yarn test:jest`.'
# })
run: yarn test:jest:ci

- name: Run mocha tests
id: mocha-tests
run: yarn test:mocha

- name: Run integration tests
if: steps.integration_tests_results.outputs.integration_tests_results != 'success'
id: integration-tests
run: node scripts/jest_integration --ci --colors --max-old-space-size=5120

# Set cache if linter, unit tests, and integration tests were successful then the job will be marked successful
# Sets individual results to empower re-runs of the same build without re-running successful steps.
- if: |
(steps.linter.outcome == 'success' || steps.linter.outcome == 'skipped') &&
(steps.unit-tests.outcome == 'success' || steps.unit-tests.outcome == 'skipped') &&
(steps.integration-tests.outcome == 'success' || steps.integration-tests.outcome == 'skipped')
run: echo "::set-output name=job_successful::true" > job_successful
- if: steps.linter.outcome == 'success' || steps.linter.outcome == 'skipped'
run: echo "::set-output name=linter_results::success" > linter_results
- if: steps.unit-tests.outcome == 'success' || steps.unit-tests.outcome == 'skipped'
run: echo "::set-output name=unit_tests_results::success" > unit_tests_results
- if: steps.integration-tests.outcome == 'success' || steps.integration-tests.outcome == 'skipped'
run: echo "::set-output name=integration_tests_results::success" > integration_tests_results
run: yarn test:jest_integration:ci

functional-tests:
needs: [ build-lint-test ]
runs-on: ubuntu-latest
Expand All @@ -138,76 +69,39 @@ jobs:
steps:
- run: echo Running functional tests for ciGroup${{ matrix.group }}

# Access a cache of set results from a previous run of the job
# This is to prevent re-running a CI group that was already successful since it is not native to github actions
# Can be used to verify flaky steps with reduced times
- name: Restore the cached run
uses: actions/cache@v2
with:
path: |
ftr_tests_results
key: ${{ github.run_id }}-${{ github.job }}-${{ matrix.group }}-${{ github.sha }}
restore-keys: |
${{ github.run_id }}-${{ github.job }}-${{ matrix.group }}-${{ github.sha }}
- name: Get the cached tests results
id: ftr_tests_results
run: cat ftr_tests_results 2>/dev/null || echo 'default'

- name: Checkout code
if: steps.ftr_tests_results.outputs.ftr_tests_results != 'success'
uses: actions/checkout@v2

- name: Setup Node
if: steps.ftr_tests_results.outputs.ftr_tests_results != 'success'
uses: actions/setup-node@v2
with:
node-version-file: ".nvmrc"
registry-url: 'https://registry.npmjs.org'

- name: Setup Yarn
if: steps.ftr_tests_results.outputs.ftr_tests_results != 'success'
run: |
npm uninstall -g yarn
npm i -g [email protected]
- name: Get cache path
if: steps.ftr_tests_results.outputs.ftr_tests_results != 'success'
id: cache-path
run: echo "::set-output name=CACHE_DIR::$(yarn cache dir)"

- name: Setup cache
if: steps.ftr_tests_results.outputs.ftr_tests_results != 'success'
uses: actions/cache@v2
with:
path: ${{ steps.cache-path.outputs.CACHE_DIR }}
key: ${{ runner.os }}-yarn-${{ env.CACHE_NAME }}-${{ hashFiles('**/yarn.lock') }}
restore-keys: |
${{ runner.os }}-yarn-${{ env.CACHE_NAME }}-
${{ runner.os }}-yarn-
${{ runner.os }}-
# github virtual env is the latest chrome
- name: Setup chromedriver
if: steps.ftr_tests_results.outputs.ftr_tests_results != 'success'
run: yarn add --dev [email protected]

- name: Run bootstrap
if: steps.ftr_tests_results.outputs.ftr_tests_results != 'success'
run: yarn osd bootstrap

- name: Build plugins
if: steps.ftr_tests_results.outputs.ftr_tests_results != 'success'
run: node scripts/build_opensearch_dashboards_platform_plugins --no-examples --workers 10

- if: steps.ftr_tests_results.outputs.ftr_tests_results != 'success'
- name: Run CI test group ${{ matrix.group }}
id: ftr-tests
run: node scripts/functional_tests.js --config test/functional/config.js --include ciGroup${{ matrix.group }}
env:
CI_GROUP: ciGroup${{ matrix.group }}
CI_PARALLEL_PROCESS_NUMBER: ciGroup${{ matrix.group }}
JOB: ci${{ matrix.group }}
CACHE_DIR: ciGroup${{ matrix.group }}

- if: steps.ftr-tests.outcome == 'success' || steps.ftr-tests.outcome == 'skipped'
run: echo "::set-output name=ftr_tests_results::success" > ftr_tests_results
2 changes: 2 additions & 0 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,9 @@
"test": "grunt test",
"test:bwc": "./scripts/bwctest_osd.sh",
"test:jest": "node scripts/jest",
"test:jest:ci": "node scripts/jest --ci --colors --runInBand",
"test:jest_integration": "node scripts/jest_integration",
"test:jest_integration:ci": "node scripts/jest_integration --ci --colors --max-old-space-size=5120",
"test:mocha": "node scripts/mocha",
"test:mocha:coverage": "grunt test:mochaCoverage",
"test:ftr": "node scripts/functional_tests",
Expand Down
16 changes: 7 additions & 9 deletions packages/osd-pm/src/run.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,6 @@ import { runCommand } from './run';
import { Project } from './utils/project';
import { log } from './utils/log';

const testif = process.env.SKIP_BAD_APPLES === 'true' ? test.skip : test;

log.setLogLevel('silent');

const rootPath = resolve(`${__dirname}/utils/__fixtures__/opensearch-dashboards`);
Expand Down Expand Up @@ -72,14 +70,14 @@ beforeEach(() => {
};
});

testif('passes all found projects to the command if no filter is specified', async () => {
test('passes all found projects to the command if no filter is specified', async () => {
await runCommand(command, config);

expect(command.run).toHaveBeenCalledTimes(1);
expect(getExpectedProjectsAndGraph(command.run)).toMatchSnapshot();
});

testif('excludes project if single `exclude` filter is specified', async () => {
test('excludes project if single `exclude` filter is specified', async () => {
await runCommand(command, {
...config,
options: { exclude: 'foo' },
Expand All @@ -89,7 +87,7 @@ testif('excludes project if single `exclude` filter is specified', async () => {
expect(getExpectedProjectsAndGraph(command.run)).toMatchSnapshot();
});

testif('excludes projects if multiple `exclude` filter are specified', async () => {
test('excludes projects if multiple `exclude` filter are specified', async () => {
await runCommand(command, {
...config,
options: { exclude: ['foo', 'bar', 'baz'] },
Expand All @@ -99,7 +97,7 @@ testif('excludes projects if multiple `exclude` filter are specified', async ()
expect(getExpectedProjectsAndGraph(command.run)).toMatchSnapshot();
});

testif('includes single project if single `include` filter is specified', async () => {
test('includes single project if single `include` filter is specified', async () => {
await runCommand(command, {
...config,
options: { include: 'foo' },
Expand All @@ -109,7 +107,7 @@ testif('includes single project if single `include` filter is specified', async
expect(getExpectedProjectsAndGraph(command.run)).toMatchSnapshot();
});

testif('includes only projects specified in multiple `include` filters', async () => {
test('includes only projects specified in multiple `include` filters', async () => {
await runCommand(command, {
...config,
options: { include: ['foo', 'bar', 'baz'] },
Expand All @@ -119,7 +117,7 @@ testif('includes only projects specified in multiple `include` filters', async (
expect(getExpectedProjectsAndGraph(command.run)).toMatchSnapshot();
});

testif('respects both `include` and `exclude` filters if specified at the same time', async () => {
test('respects both `include` and `exclude` filters if specified at the same time', async () => {
await runCommand(command, {
...config,
options: { include: ['foo', 'bar', 'baz'], exclude: 'bar' },
Expand All @@ -129,7 +127,7 @@ testif('respects both `include` and `exclude` filters if specified at the same t
expect(getExpectedProjectsAndGraph(command.run)).toMatchSnapshot();
});

testif('does not run command if all projects are filtered out', async () => {
test('does not run command if all projects are filtered out', async () => {
const mockProcessExit = jest.spyOn(process, 'exit').mockReturnValue(undefined as never);

await runCommand(command, {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,7 @@ import { waitFor, render } from '@testing-library/react';
import { ErrorEmbeddable } from './error_embeddable';
import { EmbeddableRoot } from './embeddable_root';

const testif = process.env.SKIP_BAD_APPLES === 'true' ? test.skip : test;

testif('ErrorEmbeddable renders an embeddable', async () => {
test('ErrorEmbeddable renders an embeddable', async () => {
const embeddable = new ErrorEmbeddable('some error occurred', { id: '123', title: 'Error' });
const { getByTestId, getByText } = render(<EmbeddableRoot embeddable={embeddable} />);

Expand All @@ -45,7 +43,7 @@ testif('ErrorEmbeddable renders an embeddable', async () => {
expect(getByText(/some error occurred/i)).toBeVisible();
});

testif('ErrorEmbeddable renders an embeddable with markdown message', async () => {
test('ErrorEmbeddable renders an embeddable with markdown message', async () => {
const error = '[some link](http://localhost:5601/takeMeThere)';
const embeddable = new ErrorEmbeddable(error, { id: '123', title: 'Error' });
const { getByTestId, getByText } = render(<EmbeddableRoot embeddable={embeddable} />);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,7 @@ import React from 'react';
import { waitFor, render } from '@testing-library/react';
import MarkdownVisComponent from './markdown_vis_controller';

const describeif = process.env.SKIP_BAD_APPLES === 'true' ? describe.skip : describe;

describeif('markdown vis controller', () => {
describe('markdown vis controller', () => {
it('should set html from markdown params', async () => {
const vis = {
params: {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,10 +37,9 @@ import { setFormatService } from '../services';
import { dataPluginMock } from '../../../data/public/mocks';
import { setHTMLElementOffset, setSVGElementGetBBox } from '../../../../test_utils/public';

const describeif = process.env.SKIP_BAD_APPLES === 'true' ? describe.skip : describe;
const seedColors = ['#00a69b', '#57c17b', '#6f87d8', '#663db8', '#bc52bc', '#9e3533', '#daa05d'];

describeif('TagCloudVisualizationTest', () => {
describe('TagCloudVisualizationTest', () => {
let domNode;
let visParams;
let SVGElementGetBBoxSpyInstance;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,13 +40,11 @@ import { visualizeAppStateStub } from '../stubs';
import { VisualizeConstants } from '../../visualize_constants';
import { createVisualizeServicesMock } from '../mocks';

const describeif = process.env.SKIP_BAD_APPLES === 'true' ? describe.skip : describe;

jest.mock('../utils');
jest.mock('../create_visualize_app_state');
jest.mock('../../../../../data/public');

describeif('useVisualizeAppState', () => {
describe('useVisualizeAppState', () => {
const { visStateToEditorState } = jest.requireMock('../utils');
const { createVisualizeAppState } = jest.requireMock('../create_visualize_app_state');
const { connectToQueryState } = jest.requireMock('../../../../../data/public');
Expand Down

0 comments on commit fef8af0

Please sign in to comment.