From 8be9ae629c8d58c182a86ef60147f5f7b51752da Mon Sep 17 00:00:00 2001 From: Dzmitry Lemechko Date: Mon, 12 Dec 2022 16:54:25 +0100 Subject: [PATCH] [performance] unify scripts (#147202) ## Summary This PR is the follow-up to #147002 and #146129 and makes few changes to make both performance scripts expose very similar cli and allow run `run_performance` locally for debug purpose. - to run a single test locally against source: - single user journey: `node scripts/run_performance.js --journey-path x-pack/performance/journeys/login.ts` - scalability journey (auto-generated): `node scripts/run_scalability.js --journey-path target/scalability_traces/kibana/login-0184f19e-0903-450d-884d-436d737a3abe.json` `skip-warmup` flag to avoid journey warmup runs for performance data set extraction (we don't need to run journey twice while interested in APM traces only) PR also updates pipeline scripts with new changes --- .../functional/performance_playwright.sh | 12 +- .../scalability_dataset_extraction.sh | 2 +- .../scripts/steps/scalability/benchmarking.sh | 2 +- src/dev/performance/run_performance_cli.ts | 131 ++++++++++++------ src/dev/performance/run_scalability_cli.ts | 69 +++++---- 5 files changed, 130 insertions(+), 86 deletions(-) diff --git a/.buildkite/scripts/steps/functional/performance_playwright.sh b/.buildkite/scripts/steps/functional/performance_playwright.sh index 747dd74198102..5c8e86f48506b 100644 --- a/.buildkite/scripts/steps/functional/performance_playwright.sh +++ b/.buildkite/scripts/steps/functional/performance_playwright.sh @@ -12,8 +12,16 @@ is_test_execution_step rm -rf "$KIBANA_BUILD_LOCATION" .buildkite/scripts/download_build_artifacts.sh -echo "--- Running performance tests" -node scripts/run_performance.js --kibana-install-dir "$KIBANA_BUILD_LOCATION" +if [ "$BUILDKITE_PIPELINE_SLUG" == "kibana-performance-data-set-extraction" ]; then + # 'performance-data-set-extraction' uses 'n2-2-spot' agent, performance metrics don't matter + # and we skip warmup phase for each test + echo "--- Running single user journeys" + node scripts/run_performance.js --kibana-install-dir "$KIBANA_BUILD_LOCATION" --skip-warmup +else + # pipeline should use bare metal static worker + echo "--- Running performance tests" + node scripts/run_performance.js --kibana-install-dir "$KIBANA_BUILD_LOCATION" +fi echo "--- Upload journey step screenshots" JOURNEY_SCREENSHOTS_DIR="${KIBANA_DIR}/data/journey_screenshots" diff --git a/.buildkite/scripts/steps/functional/scalability_dataset_extraction.sh b/.buildkite/scripts/steps/functional/scalability_dataset_extraction.sh index aff087005ac5c..c6f12e3db6fd5 100755 --- a/.buildkite/scripts/steps/functional/scalability_dataset_extraction.sh +++ b/.buildkite/scripts/steps/functional/scalability_dataset_extraction.sh @@ -46,7 +46,7 @@ cd "${OUTPUT_DIR}/.." gsutil -m cp -r "${BUILD_ID}" "${GCS_BUCKET}" cd - -if [ "$BUILDKITE_PIPELINE_SLUG" == "kibana-single-user-performance" ]; then +if [ "$BUILDKITE_PIPELINE_SLUG" == "kibana-performance-data-set-extraction" ]; then echo "--- Promoting '${BUILD_ID}' dataset to LATEST" cd "${OUTPUT_DIR}/.." echo "${BUILD_ID}" > latest diff --git a/.buildkite/scripts/steps/scalability/benchmarking.sh b/.buildkite/scripts/steps/scalability/benchmarking.sh index e47e0bc10a3c5..57f63445cb26a 100755 --- a/.buildkite/scripts/steps/scalability/benchmarking.sh +++ b/.buildkite/scripts/steps/scalability/benchmarking.sh @@ -76,7 +76,7 @@ checkout_and_compile_load_runner echo "--- Run Scalability Tests" cd "$KIBANA_DIR" -node scripts/run_scalability --kibana-install-dir "$KIBANA_BUILD_LOCATION" --journey-config-path "scalability_traces/server" +node scripts/run_scalability --kibana-install-dir "$KIBANA_BUILD_LOCATION" --journey-path "scalability_traces/server" echo "--- Upload test results" upload_test_results diff --git a/src/dev/performance/run_performance_cli.ts b/src/dev/performance/run_performance_cli.ts index ac0e708dcee0a..142b024df0d4e 100644 --- a/src/dev/performance/run_performance_cli.ts +++ b/src/dev/performance/run_performance_cli.ts @@ -6,41 +6,95 @@ * Side Public License, v 1. */ +import { createFlagError } from '@kbn/dev-cli-errors'; import { run } from '@kbn/dev-cli-runner'; import { REPO_ROOT } from '@kbn/utils'; -import Fsp from 'fs/promises'; +import fs from 'fs'; import path from 'path'; +const JOURNEY_BASE_PATH = 'x-pack/performance/journeys'; + +export interface Journey { + name: string; + path: string; +} + run( async ({ log, flagsReader, procRunner }) => { - async function runFunctionalTest(journey: string, phase: 'TEST' | 'WARMUP') { - // Pass in a clean APM environment, so that FTR can later - // set it's own values. - const cleanApmEnv = { - ELASTIC_APM_TRANSACTION_SAMPLE_RATE: undefined, - ELASTIC_APM_SERVER_URL: undefined, - ELASTIC_APM_SECRET_TOKEN: undefined, - ELASTIC_APM_ACTIVE: undefined, - ELASTIC_APM_CONTEXT_PROPAGATION_ONLY: undefined, - ELASTIC_APM_GLOBAL_LABELS: undefined, - }; + const skipWarmup = flagsReader.boolean('skip-warmup'); + const kibanaInstallDir = flagsReader.path('kibana-install-dir'); + const journeyPath = flagsReader.path('journey-path'); + + if (kibanaInstallDir && !fs.existsSync(kibanaInstallDir)) { + throw createFlagError('--kibana-install-dir must be an existing directory'); + } + + if (journeyPath && !fs.existsSync(journeyPath)) { + throw createFlagError('--journey-path must be an existing path'); + } + + let journeys: Journey[] = []; + + if (journeyPath) { + journeys = fs.statSync(journeyPath).isDirectory() + ? fs.readdirSync(journeyPath).map((fileName) => { + return { name: fileName, path: path.resolve(journeyPath, fileName) }; + }) + : [{ name: path.parse(journeyPath).name, path: journeyPath }]; + } else { + const journeyBasePath = path.resolve(REPO_ROOT, JOURNEY_BASE_PATH); + journeys = fs.readdirSync(journeyBasePath).map((name) => { + return { name, path: path.join(journeyBasePath, name) }; + }); + } + + log.info( + `Found ${journeys.length} journeys to run: ${JSON.stringify(journeys.map((j) => j.name))}` + ); + + const failedJourneys = []; + + for (const journey of journeys) { + try { + await startEs(); + if (!skipWarmup) { + await runWarmup(journey, kibanaInstallDir); + } + await runTest(journey, kibanaInstallDir); + } catch (e) { + log.error(e); + failedJourneys.push(journey.name); + } finally { + await procRunner.stop('es'); + } + } + async function runFunctionalTest( + configPath: string, + phase: 'TEST' | 'WARMUP', + kibanaBuildDir: string | undefined + ) { await procRunner.run('functional-tests', { cmd: 'node', args: [ 'scripts/functional_tests', - ['--config', path.join(journeyBasePath, journey)], - ['--kibana-install-dir', kibanaInstallDir], + ['--config', configPath], + kibanaBuildDir ? ['--kibana-install-dir', kibanaBuildDir] : [], '--debug', '--bail', ].flat(), cwd: REPO_ROOT, wait: true, env: { + // Reset all the ELASTIC APM env vars to undefined, FTR config might set it's own values. + ...Object.fromEntries( + Object.keys(process.env).flatMap((k) => + k.startsWith('ELASTIC_APM_') ? [[k, undefined]] : [] + ) + ), TEST_PERFORMANCE_PHASE: phase, TEST_ES_URL: 'http://elastic:changeme@localhost:9200', TEST_ES_DISABLE_STARTUP: 'true', - ...cleanApmEnv, }, }); } @@ -57,44 +111,24 @@ run( log.info(`✅ ES is ready and will run in the background`); } - async function runWarmup(journey: string) { + async function runWarmup(journey: Journey, kibanaBuildDir: string | undefined) { try { - process.stdout.write(`--- Running warmup ${journey}\n`); + process.stdout.write(`--- Running warmup: ${journey.name}\n`); // Set the phase to WARMUP, this will prevent the functional test server from starting Elasticsearch, opt in to telemetry, etc. - await runFunctionalTest(journey, 'WARMUP'); + await runFunctionalTest(journey.path, 'WARMUP', kibanaBuildDir); } catch (e) { - log.warning(`Warmup for ${journey} failed`); + log.warning(`Warmup for ${journey.name} failed`); throw e; } } - async function runTest(journey: string) { - try { - process.stdout.write(`--- Running test ${journey}\n`); - await runFunctionalTest(journey, 'TEST'); - } catch (e) { - log.warning(`Journey ${journey} failed. Retrying once...`); - await runFunctionalTest(journey, 'TEST'); - } - } - - const journeyBasePath = path.resolve(REPO_ROOT, 'x-pack/performance/journeys/'); - const kibanaInstallDir = flagsReader.requiredPath('kibana-install-dir'); - const journeys = await Fsp.readdir(journeyBasePath); - log.info(`Found ${journeys.length} journeys to run`); - - const failedJourneys = []; - - for (const journey of journeys) { + async function runTest(journey: Journey, kibanaBuildDir: string | undefined) { try { - await startEs(); - await runWarmup(journey); - await runTest(journey); + process.stdout.write(`--- Running ${journey.name}\n`); + await runFunctionalTest(journey.path, 'TEST', kibanaBuildDir); } catch (e) { - log.error(e); - failedJourneys.push(journey); - } finally { - await procRunner.stop('es'); + log.warning(`Journey ${journey.name} failed. Retrying once...`); + await runFunctionalTest(journey.path, 'TEST', kibanaBuildDir); } } @@ -104,7 +138,14 @@ run( }, { flags: { - string: ['kibana-install-dir'], + string: ['kibana-install-dir', 'journey-path'], + boolean: ['skip-warmup'], + help: ` + --kibana-install-dir=dir Run Kibana from existing install directory instead of from source + --journey-path=path Define path to performance journey or directory with multiple journeys + that should be executed. '${JOURNEY_BASE_PATH}' is run by default + --skip-warmup Journey will be executed without warmup (TEST phase only) + `, }, } ); diff --git a/src/dev/performance/run_scalability_cli.ts b/src/dev/performance/run_scalability_cli.ts index eec11e611f3e1..5f925772dcc23 100644 --- a/src/dev/performance/run_scalability_cli.ts +++ b/src/dev/performance/run_scalability_cli.ts @@ -11,42 +11,48 @@ import { run } from '@kbn/dev-cli-runner'; import { REPO_ROOT } from '@kbn/utils'; import fs from 'fs'; import path from 'path'; +import { Journey } from './run_performance_cli'; run( async ({ log, flagsReader, procRunner }) => { const kibanaInstallDir = flagsReader.path('kibana-install-dir'); - const journeyConfigPath = flagsReader.requiredPath('journey-config-path'); + const journeyPath = flagsReader.requiredPath('journey-path'); if (kibanaInstallDir && !fs.existsSync(kibanaInstallDir)) { throw createFlagError('--kibana-install-dir must be an existing directory'); } if ( - !fs.existsSync(journeyConfigPath) || - (!fs.statSync(journeyConfigPath).isDirectory() && path.extname(journeyConfigPath) !== '.json') + !fs.existsSync(journeyPath) || + (!fs.statSync(journeyPath).isDirectory() && path.extname(journeyPath) !== '.json') ) { - throw createFlagError( - '--journey-config-path must be an existing directory or scalability json path' - ); + throw createFlagError('--journey-path must be an existing directory or journey path'); } - const journeys = fs.statSync(journeyConfigPath).isDirectory() + const journeys: Journey[] = fs.statSync(journeyPath).isDirectory() ? fs - .readdirSync(journeyConfigPath) + .readdirSync(journeyPath) .filter((fileName) => path.extname(fileName) === '.json') - .map((fileName) => path.resolve(journeyConfigPath, fileName)) - : [journeyConfigPath]; + .map((fileName) => { + return { + name: path.parse(fileName).name, + path: path.resolve(journeyPath, fileName), + }; + }) + : [{ name: path.parse(journeyPath).name, path: journeyPath }]; - log.info(`Found ${journeys.length} journeys to run:\n${JSON.stringify(journeys)}`); + log.info( + `Found ${journeys.length} journeys to run: ${JSON.stringify(journeys.map((j) => j.name))}` + ); const failedJourneys = []; for (const journey of journeys) { try { - process.stdout.write(`--- Running scalability journey: ${journey}\n`); - await runScalabilityJourney(journey, kibanaInstallDir); + process.stdout.write(`--- Running scalability journey: ${journey.name}\n`); + await runScalabilityJourney(journey.path, kibanaInstallDir); } catch (e) { log.error(e); - failedJourneys.push(journey); + failedJourneys.push(journey.name); } } @@ -54,29 +60,13 @@ run( throw new Error(`${failedJourneys.length} journeys failed: ${failedJourneys.join(',')}`); } - async function runScalabilityJourney(filePath: string, kibanaDir?: string) { - // Pass in a clean APM environment, so that FTR can later - // set it's own values. - const cleanApmEnv = { - ELASTIC_APM_ACTIVE: undefined, - ELASTIC_APM_BREAKDOWN_METRICS: undefined, - ELASTIC_APM_CONTEXT_PROPAGATION_ONLY: undefined, - ELASTIC_APM_CAPTURE_SPAN_STACK_TRACES: undefined, - ELASTIC_APM_ENVIRONMENT: undefined, - ELASTIC_APM_GLOBAL_LABELS: undefined, - ELASTIC_APM_MAX_QUEUE_SIZE: undefined, - ELASTIC_APM_METRICS_INTERVAL: undefined, - ELASTIC_APM_SERVER_URL: undefined, - ELASTIC_APM_SECRET_TOKEN: undefined, - ELASTIC_APM_TRANSACTION_SAMPLE_RATE: undefined, - }; - + async function runScalabilityJourney(filePath: string, kibanaBuildDir: string | undefined) { await procRunner.run('scalability-tests', { cmd: 'node', args: [ 'scripts/functional_tests', ['--config', 'x-pack/test/scalability/config.ts'], - kibanaDir ? ['--kibana-install-dir', kibanaDir] : [], + kibanaBuildDir ? ['--kibana-install-dir', kibanaBuildDir] : [], '--debug', '--logToFile', '--bail', @@ -84,7 +74,12 @@ run( cwd: REPO_ROOT, wait: true, env: { - ...cleanApmEnv, + // Reset all the ELASTIC APM env vars to undefined, FTR config might set it's own values. + ...Object.fromEntries( + Object.keys(process.env).flatMap((k) => + k.startsWith('ELASTIC_APM_') ? [[k, undefined]] : [] + ) + ), SCALABILITY_JOURNEY_PATH: filePath, // journey json file for Gatling test runner KIBANA_DIR: REPO_ROOT, // Gatling test runner use it to find kbn/es archives }, @@ -93,11 +88,11 @@ run( }, { flags: { - string: ['kibana-install-dir', 'journey-config-path'], + string: ['kibana-install-dir', 'journey-path'], help: ` - --kibana-install-dir Run Kibana from existing install directory instead of from source - --journey-config-path Define a scalability journey config or directory with multiple - configs that should be executed + --kibana-install-dir=dir Run Kibana from existing install directory instead of from source + --journey-path=path Define path to scalability journey config or directory with multiple + configs that should be executed `, }, }