From f7f6dc59fa0cda12c38425dda5f6ca8001f796ee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tadej=20Jane=C5=BE?= Date: Tue, 14 Jul 2020 14:50:22 +0200 Subject: [PATCH] go/oasis-test-runner: Refactor code to refer to scenario(s) consistently go/oasis-test-runner/cmd: Rename --test flag to --scenario (-s for short version) for consistency. go/oasis-test-runner/env: - Rename TestInstanceInfo type to ScenarioInstanceInfo and its Test field to Scenario for consistency. - Rename Env's TestInfo() and WriteTestInfo() methods to ScenarioInfo() and WriteScenarioInfo() for consistency. go/oasis-node/cmd/common/metrics: Use "scenario" metrics label instead of "test" for consistency. --- .buildkite/benchmarks.pipeline.yml | 10 +- .buildkite/code.pipeline.yml | 4 +- .../scripts/daily_benchmark_analysis.sh | 4 +- .buildkite/scripts/daily_txsource.sh | 2 +- .buildkite/scripts/sgx_ias_tests.sh | 2 +- .changelog/3108.breaking.1.md | 3 + .changelog/3108.breaking.2.md | 7 + .changelog/3108.breaking.3.md | 1 + .changelog/3108.internal.md | 1 + docs/oasis-node/metrics.md | 2 +- go/oasis-node/cmd/common/metrics/metrics.go | 8 +- go/oasis-test-runner/README.md | 87 +++-- go/oasis-test-runner/cmd/cmp/cmp.go | 369 +++++++++++++----- go/oasis-test-runner/cmd/common/common.go | 13 +- go/oasis-test-runner/cmd/root.go | 192 +++++---- go/oasis-test-runner/cmd/root_test.go | 8 +- go/oasis-test-runner/env/env.go | 55 +-- go/oasis-test-runner/oasis/args.go | 2 +- go/oasis-test-runner/scenario/e2e/e2e.go | 2 +- .../scenario/e2e/runtime/runtime.go | 2 +- .../scenario/remotesigner/remotesigner.go | 2 +- 21 files changed, 496 insertions(+), 280 deletions(-) create mode 100644 .changelog/3108.breaking.1.md create mode 100644 .changelog/3108.breaking.2.md create mode 100644 .changelog/3108.breaking.3.md create mode 100644 .changelog/3108.internal.md diff --git a/.buildkite/benchmarks.pipeline.yml b/.buildkite/benchmarks.pipeline.yml index a03c35d60b6..0805598e1f5 100644 --- a/.buildkite/benchmarks.pipeline.yml +++ b/.buildkite/benchmarks.pipeline.yml @@ -29,7 +29,7 @@ docker_plugin_default_config: &docker_plugin_default_config - "BUILDKITE_PIPELINE_NAME" - "BUILDKITE_BUILD_NUMBER" - "BUILDKITE_BUILD_URL" - - "TESTS" + - "SCENARIOS" - "NUM_RUNS" - "SLACK_WEBHOOK_URL" - "METRICS_PUSH_ADDR" @@ -123,7 +123,13 @@ steps: command: - .buildkite/scripts/download_e2e_test_artifacts.sh - rm -rf /var/tmp/benchmarks/* - - .buildkite/scripts/test_e2e.sh --metrics.address $METRICS_PUSH_ADDR --metrics.interval 5s --metrics.labels instance=\$BUILDKITE_PIPELINE_NAME-\$BUILDKITE_BUILD_NUMBER --num_runs $NUM_RUNS -t $TESTS + - >- + .buildkite/scripts/test_e2e.sh + --metrics.address $METRICS_PUSH_ADDR + --metrics.interval 5s + --metrics.labels instance=$BUILDKITE_PIPELINE_NAME-$BUILDKITE_BUILD_NUMBER + --num_runs $NUM_RUNS + --scenario $SCENARIOS env: TEST_BASE_DIR: /var/tmp/benchmarks agents: diff --git a/.buildkite/code.pipeline.yml b/.buildkite/code.pipeline.yml index ffe38f14be5..543cab9059f 100644 --- a/.buildkite/code.pipeline.yml +++ b/.buildkite/code.pipeline.yml @@ -244,8 +244,8 @@ steps: timeout_in_minutes: 21 command: - .buildkite/scripts/download_e2e_test_artifacts.sh - # Only run runtime tests as others do not use SGX. - - .buildkite/scripts/test_e2e.sh -t e2e/runtime/.* + # Only run runtime scenarios as others do not use SGX. + - .buildkite/scripts/test_e2e.sh --scenario e2e/runtime/.* artifact_paths: - coverage-merged-e2e-*.txt - /tmp/e2e/**/*.log diff --git a/.buildkite/scripts/daily_benchmark_analysis.sh b/.buildkite/scripts/daily_benchmark_analysis.sh index cdd0b41edfc..6b9fae56130 100755 --- a/.buildkite/scripts/daily_benchmark_analysis.sh +++ b/.buildkite/scripts/daily_benchmark_analysis.sh @@ -12,7 +12,7 @@ # METRICS_SOURCE_GIT_BRANCH - name of feature branch on git (e.g. jsmith/feature/abc) # METRICS_TARGET_GIT_BRANCH - name of master branch on git (e.g. master) # METRICS_THRESHOLDS - max or min thresholds flags (e.g. --max_threshold.cpu.avg_ratio 1.05) -# TESTS - names of test(s) to compare (e.g. e2e/runtime/runtime) +# SCENARIOS - names of scenario(s) to compare (e.g. e2e/runtime/runtime) # SLACK_WEBHOOK_URL - slack webhook for reporting (e.g. https://hooks.slack.com/services/xxxxxx) set -ux @@ -21,7 +21,7 @@ set -ux --metrics.address $METRICS_QUERY_ADDR \ --metrics.source.git_branch $METRICS_SOURCE_GIT_BRANCH \ --metrics.target.git_branch $METRICS_TARGET_GIT_BRANCH \ - -t $TESTS \ + --scenario $SCENARIOS \ --log.level INFO \ --log.format JSON \ $METRICS_THRESHOLDS \ diff --git a/.buildkite/scripts/daily_txsource.sh b/.buildkite/scripts/daily_txsource.sh index 268626dba2c..79989892230 100755 --- a/.buildkite/scripts/daily_txsource.sh +++ b/.buildkite/scripts/daily_txsource.sh @@ -9,7 +9,7 @@ if [[ $BUILDKITE_RETRY_COUNT == 0 ]]; then ./.buildkite/scripts/test_e2e.sh \ --metrics.address $METRICS_PUSH_ADDR \ --metrics.labels instance=$BUILDKITE_PIPELINE_NAME-$BUILDKITE_BUILD_NUMBER \ - --test e2e/runtime/txsource-multi + --scenario e2e/runtime/txsource-multi else curl -H "Content-Type: application/json" \ -X POST \ diff --git a/.buildkite/scripts/sgx_ias_tests.sh b/.buildkite/scripts/sgx_ias_tests.sh index 6765924418a..ef9731d3162 100755 --- a/.buildkite/scripts/sgx_ias_tests.sh +++ b/.buildkite/scripts/sgx_ias_tests.sh @@ -21,4 +21,4 @@ export OASIS_UNSAFE_KM_POLICY_KEYS=1 export OASIS_UNSAFE_ALLOW_DEBUG_ENCLAVES=1 make all -.buildkite/scripts/test_e2e.sh -t e2e/runtime/runtime-encryption +.buildkite/scripts/test_e2e.sh --scenario e2e/runtime/runtime-encryption diff --git a/.changelog/3108.breaking.1.md b/.changelog/3108.breaking.1.md new file mode 100644 index 00000000000..6968585a51c --- /dev/null +++ b/.changelog/3108.breaking.1.md @@ -0,0 +1,3 @@ +go/oasis-test-runner/cmd: Rename `--test` flag to `--scenario` flag + +Rename the short version from `-t` to `-s`. diff --git a/.changelog/3108.breaking.2.md b/.changelog/3108.breaking.2.md new file mode 100644 index 00000000000..208207d99e8 --- /dev/null +++ b/.changelog/3108.breaking.2.md @@ -0,0 +1,7 @@ +go/oasis-test-runner/env: Rename types, fields, functions to refer to scenario + +Rename `TestInstanceInfo` type to `ScenarioInstanceInfo` and its `Test` field +to `Scenario`. + +Rename `Env`'s `TestInfo()` and `WriteTestInfo()` methods to `ScenarioInfo()` +and `WriteScenarioInfo()` for consistency. diff --git a/.changelog/3108.breaking.3.md b/.changelog/3108.breaking.3.md new file mode 100644 index 00000000000..507570d47bc --- /dev/null +++ b/.changelog/3108.breaking.3.md @@ -0,0 +1 @@ +go/oasis-node/cmd/common/metrics: Rename "scenario" metrics label to "test" diff --git a/.changelog/3108.internal.md b/.changelog/3108.internal.md new file mode 100644 index 00000000000..77de57a05c0 --- /dev/null +++ b/.changelog/3108.internal.md @@ -0,0 +1 @@ +go/oasis-test-runner: Refactor code to refer to scenario(s) consistently diff --git a/docs/oasis-node/metrics.md b/docs/oasis-node/metrics.md index e34374b9a18..e5e6278565c 100644 --- a/docs/oasis-node/metrics.md +++ b/docs/oasis-node/metrics.md @@ -71,7 +71,7 @@ oasis_storage_failures | Counter | Number of storage failures. | call | [storage oasis_storage_latency | Summary | Storage call latency (seconds). | call | [storage](../../go/storage/metrics.go) oasis_storage_successes | Counter | Number of storage successes. | call | [storage](../../go/storage/metrics.go) oasis_storage_value_size | Summary | Storage call value size (bytes). | call | [storage](../../go/storage/metrics.go) -oasis_up | Gauge | Is oasis-test-runner active for specific test. | | [oasis-node/cmd/common/metrics](../../go/oasis-node/cmd/common/metrics/metrics.go) +oasis_up | Gauge | Is oasis-test-runner active for specific scenario. | | [oasis-node/cmd/common/metrics](../../go/oasis-node/cmd/common/metrics/metrics.go) oasis_worker_aborted_batch_count | Counter | Number of aborted batches. | runtime | [worker/compute/executor/committee](../../go/worker/compute/executor/committee/node.go) oasis_worker_aborted_merge_count | Counter | Number of aborted merges. | runtime | [worker/compute/merge/committee](../../go/worker/compute/merge/committee/node.go) oasis_worker_batch_processing_time | Summary | Time it takes for a batch to finalize (seconds). | runtime | [worker/compute/executor/committee](../../go/worker/compute/executor/committee/node.go) diff --git a/go/oasis-node/cmd/common/metrics/metrics.go b/go/oasis-node/cmd/common/metrics/metrics.go index e8fa3a11acd..56855171dcc 100644 --- a/go/oasis-node/cmd/common/metrics/metrics.go +++ b/go/oasis-node/cmd/common/metrics/metrics.go @@ -37,7 +37,7 @@ const ( MetricsLabelInstance = "instance" MetricsLabelRun = "run" MetricsLabelSoftwareVersion = "software_version" - MetricsLabelTest = "test" + MetricsLabelScenario = "scenario" MetricsModeNone = "none" MetricsModePull = "pull" @@ -51,7 +51,7 @@ var ( UpGauge = prometheus.NewGauge( prometheus.GaugeOpts{ Name: MetricUp, - Help: "Is oasis-test-runner active for specific test.", + Help: "Is oasis-test-runner active for specific scenario.", }, ) ) @@ -271,11 +271,11 @@ func EscapeLabelCharacters(l string) string { } // GetDefaultPushLabels generates standard Prometheus push labels based on test current test instance info. -func GetDefaultPushLabels(ti *env.TestInstanceInfo) map[string]string { +func GetDefaultPushLabels(ti *env.ScenarioInstanceInfo) map[string]string { labels := map[string]string{ MetricsLabelInstance: ti.Instance, MetricsLabelRun: strconv.Itoa(ti.Run), - MetricsLabelTest: ti.Test, + MetricsLabelScenario: ti.Scenario, MetricsLabelSoftwareVersion: version.SoftwareVersion, } if version.GitBranch != "" { diff --git a/go/oasis-test-runner/README.md b/go/oasis-test-runner/README.md index efe58530ba9..34e897415b1 100644 --- a/go/oasis-test-runner/README.md +++ b/go/oasis-test-runner/README.md @@ -1,79 +1,96 @@ # Oasis Test Runner -`oasis-test-runner` initializes and executes end-to-end and remote signer test -suites. +The Oasis Test Runner initializes and executes different types of tests (e.g. +end-to-end, remote signer) also referred to as scenarios. -To list all supported tests and corresponding parameters, type: +To list all available scenarios and their corresponding parameters, use: ```bash oasis-test-runner list ``` -If no flags provided, all tests are executed. If you want to run specific test, -pass `--test` parameter: +If no flags are provided, all scenarios are executed. + +To run a specific scenario, e.g `e2e/runtime/runtime-dynamic`, pass the +`--scenario` flag (or its short version, `-s`) followed by scenario's name: ```bash -oasis-test-runner --test e2e/runtime/runtime-dynamic +oasis-test-runner --scenario e2e/runtime/runtime-dynamic ``` ## Benchmarking -To benchmark tests, set the `--metrics.address` parameter to the address of the -Prometheus push gateway along with `--metrics.interval`. Additionally, you -can set test-specific parameters and the number of runs each test should be run -with `--num_runs` parameter. +To benchmark scenarios, set the `--metrics.address` flag to the address of the +Prometheus push gateway along with the `--metrics.interval` flag. +Additionally, you can set scenario-specific parameters and the number of runs of +each scenario with the `--num_runs` flag. ## Benchmark analysis with `oasis-test-runner cmp` command -`cmp` command connects to Prometheus server instance containing benchmark -results generated by `oasis-test-runner` and corresponding `oasis-node` -workers. It compares the benchmark results of the last (`source`) and -pre-last (`target`) test batch (also called *instance*). You need to pass the -address of Prometheus query server using `--metrics.address` flag: +The `cmp` sub-command connects to the Prometheus server instance containing +benchmark results generated by Oasis Test Runner and the corresponding Oasis +Node workers. +It compares the benchmark results of the last (`source`) and pre-last (`target`) +scenario batch (also called *instance*). +You need to pass the address of the Prometheus query server using +`--metrics.address` flag, e.g.: ```bash oasis-test-runner cmp \ --metrics.address http://prometheus.myorg.com:9090 ``` -By default `cmp` command will fetch the results of all tests and metrics -supported by `oasis-test-runner`. If you want to compare specific metric(s) and -test(s), provide `--metrics` and `--test` flags respectively: +By default, the `oasis-test-runner cmp` command will fetch the results of all +scenarios and metrics supported by the Oasis Test Runner. +If you want to compare specific metric(s) and scenario(s), set the `--metrics` +and `--scenario` flags appropriately, e.g.: ```bash oasis-test-runner cmp \ --metrics time \ - --test e2e/runtime/multiple-runtimes + --scenario e2e/runtime/multiple-runtimes ``` -`cmp` takes *thresholds* for each metric into account. Currently, `avg_ratio` -and `max_ratio` are supported which correspond to the average and maximum ratio -of metric values of all test runs in the benchmark batch. For each ratio, you -can set the `max_threshold` and `min_threshold`. The former requires that the -ratio should not be exceeded and the latter that the provided threshold must be -reached. If not, `oasis-test-runner cmp` will exit with error code. This is -useful for integration into CI pipelines. Thresholds can be using the flag -`--{min|max}_threshold..{avg|max}_ratio`. +The `cmp` sub-command takes *thresholds* for each metric into account. +Currently, `avg_ratio` and `max_ratio` are supported which correspond to the +average and maximum ratio of metric values of all scenario runs in the benchmark +batch. + +For each ratio, you can set the `max_threshold` and `min_threshold`. +The former specifies which ratio should not be exceeded and the latter which +ratio must be reached. +If a value is not within these thresholds, the `oasis-test-runner cmp` command +will exit with an error code. + +This is useful for integration into CI pipelines. +Thresholds can be set using the following flag specification: + +```text +--{min|max}_threshold..{avg|max}_ratio +``` For example: ```bash oasis-test-runner cmp \ --metrics time \ - --test e2e/runtime/multiple-runtimes \ + --scenario e2e/runtime/multiple-runtimes \ --max_threshold.time.avg_ratio 1.1 ``` -will require that the average duration of the test in the last benchmark batch -should be at most 10\% slower than from the pre-last benchmark batch. +will require that the average duration of the scenario in the last benchmark +batch should be at most 10\% slower than from the pre-last benchmark batch. If you are developing or improving a feature in a separate git branch, you will want to perform benchmarks and compare the results of your branch to the ones -from the master branch. `oasis-test-runner` automatically sends information of -the git branch it was compiled on to Prometheus, so all you need to do is -perform benchmarks on both master and feature branches using the same Prometheus -instance. Then, pass the name of source and target branch names to `cmp` and it -will compare the last benchmark batch from each git branch respectively: +from the `master` branch. +The Oasis Test Runner automatically sends information of the git branch it was +compiled on to Prometheus, so all you need to do is perform benchmarks on both, +the `master` and a feature branch, using the same Prometheus instance. + +Afterwards, pass the name of source and target branch names to the `cmp` +sub-command and it will compare the last benchmark batch from each git branch +(respectively): ```bash oasis-test-runner cmp \ diff --git a/go/oasis-test-runner/cmd/cmp/cmp.go b/go/oasis-test-runner/cmd/cmp/cmp.go index 1dc81656ec9..075d53a6c62 100644 --- a/go/oasis-test-runner/cmd/cmp/cmp.go +++ b/go/oasis-test-runner/cmd/cmp/cmp.go @@ -82,33 +82,43 @@ min_threshold..{avg|max}_ratio, ba exits with error code 1.`, cmpLogger *logging.Logger ) -// Metric is a base class for getting a specific prometheus metric and required thresholds to test. +// Metric is a base class for getting a specific prometheus metric and required +// thresholds to test. // -// There is one instance of this struct for each test for each metric. +// There is one instance of this struct for each scenario for each metric. type Metric struct { - // getter fetches given coarse time series with finer granularity and returns average and maximum values of all runs - // in the same batch. + // getter fetches given coarse time series with finer granularity and + // returns average and maximum values of all runs in the same batch. getter func(context.Context, string, *model.SampleStream) (float64, float64, error) - // maxThresholdAvgRatio is maximum allowed ratio between the average values of source and target batches. + // maxThresholdAvgRatio is maximum allowed ratio between the average values + // of source and target batches. maxThresholdAvgRatio float64 - // maxThresholdMaxRatio is maximum allowed ratio between the maximum values of source and target batches. + // maxThresholdMaxRatio is maximum allowed ratio between the maximum values + // of source and target batches. maxThresholdMaxRatio float64 - // minThresholdAvgRatio is minimum required ratio between the average values of source and target batches. + // minThresholdAvgRatio is minimum required ratio between the average values + // of source and target batches. minThresholdAvgRatio float64 - // minThresholdMaxRatio is minimum required ratio between the maximum values of source and target batches. + // minThresholdMaxRatio is minimum required ratio between the maximum values + // of source and target batches. minThresholdMaxRatio float64 } -// getDuration returns average and maximum running times of the given coarse benchmark instance ("oasis_up" metric w/ -// minute resolution time series). -func getDuration(ctx context.Context, test string, bi *model.SampleStream) (float64, float64, error) { +// getDuration returns average and maximum running times of the given coarse +// benchmark instance ("oasis_up" metric with minute resolution time series). +func getDuration( + ctx context.Context, + scenario string, + bi *model.SampleStream, +) (float64, float64, error) { instance := string(bi.Metric[metrics.MetricsLabelInstance]) - // Re-fetch the given benchmark instance with second resolution. Each obtained time series corresponds to one run. + // Re-fetch the given benchmark instance with second resolution. Each + // obtained time series corresponds to one run. v1api := prometheusAPI.NewAPI(client) r := prometheusAPI.Range{ Start: bi.Values[0].Timestamp.Time().Add(-1 * time.Minute), @@ -125,9 +135,13 @@ func getDuration(ctx context.Context, test string, bi *model.SampleStream) (floa cmpLogger.Warn("warnings while querying Prometheus", "warnings", warnings) } if len(result.(model.Matrix)) == 0 { - return 0, 0, fmt.Errorf("getDuration: no time series matched test: %s and instance: %s", test, instance) + return 0, 0, fmt.Errorf( + "getDuration: no time series matched scenario: %s and instance: %s", + scenario, instance, + ) } - // Compute average and max duration of runs. Since we have a second-resolution, each point denotes 1 second of run's + // Compute average and max duration of runs. + // Since we have a second-resolution, each point denotes 1 second of run's // uptime. Just count all points and divide them by the number of runs. avgDuration := 0.0 maxDuration := 0.0 @@ -142,14 +156,18 @@ func getDuration(ctx context.Context, test string, bi *model.SampleStream) (floa return avgDuration, maxDuration, nil } -// getIOWork returns average and maximum sum of read and written bytes by all workers of the given coarse benchmark -// instance ("oasis_up" metric). -func getIOWork(ctx context.Context, test string, bi *model.SampleStream) (float64, float64, error) { - readAvg, readMax, err := getSummableMetric(ctx, metrics.MetricDiskReadBytes, test, bi) +// getIOWork returns average and maximum sum of read and written bytes by all +// workers of the given coarse benchmark instance ("oasis_up" metric). +func getIOWork( + ctx context.Context, + scenario string, + bi *model.SampleStream, +) (float64, float64, error) { + readAvg, readMax, err := getSummableMetric(ctx, metrics.MetricDiskReadBytes, scenario, bi) if err != nil { return 0, 0, err } - writtenAvg, writtenMax, err := getSummableMetric(ctx, metrics.MetricDiskWrittenBytes, test, bi) + writtenAvg, writtenMax, err := getSummableMetric(ctx, metrics.MetricDiskWrittenBytes, scenario, bi) if err != nil { return 0, 0, err } @@ -157,26 +175,38 @@ func getIOWork(ctx context.Context, test string, bi *model.SampleStream) (float6 return readAvg + writtenAvg, readMax + writtenMax, nil } -// getDiskUsage returns average and maximum sum of disk usage for all workers of the given coarse benchmark instance -// ("oasis_up" metric). -func getDiskUsage(ctx context.Context, test string, bi *model.SampleStream) (float64, float64, error) { - return getSummableMetric(ctx, metrics.MetricDiskUsageBytes, test, bi) +// getDiskUsage returns average and maximum sum of disk usage for all workers of +// the given coarse benchmark instance ("oasis_up" metric). +func getDiskUsage( + ctx context.Context, + scenario string, + bi *model.SampleStream, +) (float64, float64, error) { + return getSummableMetric(ctx, metrics.MetricDiskUsageBytes, scenario, bi) } -// getRssAnonMemory returns average and maximum sum of anonymous resident memory for all workers of the given coarse -// benchmark instance ("oasis_up" metric). -func getRssAnonMemory(ctx context.Context, test string, bi *model.SampleStream) (float64, float64, error) { - return getSummableMetric(ctx, metrics.MetricMemRssAnonBytes, test, bi) +// getRssAnonMemory returns average and maximum sum of anonymous resident memory +// for all workers of the given coarse benchmark instance ("oasis_up" metric). +func getRssAnonMemory( + ctx context.Context, + scenario string, + bi *model.SampleStream, +) (float64, float64, error) { + return getSummableMetric(ctx, metrics.MetricMemRssAnonBytes, scenario, bi) } -// getCPUTime returns average and maximum sum of utime and stime for all workers of the given coarse benchmark instance -// ("oasis_up" metric). -func getCPUTime(ctx context.Context, test string, bi *model.SampleStream) (float64, float64, error) { - utimeAvg, utimeMax, err := getSummableMetric(ctx, metrics.MetricCPUUTimeSeconds, test, bi) +// getCPUTime returns average and maximum sum of utime and stime for all workers +// of the given coarse benchmark instance ("oasis_up" metric). +func getCPUTime( + ctx context.Context, + scenario string, + bi *model.SampleStream, +) (float64, float64, error) { + utimeAvg, utimeMax, err := getSummableMetric(ctx, metrics.MetricCPUUTimeSeconds, scenario, bi) if err != nil { return 0, 0, err } - stimeAvg, stimeMax, err := getSummableMetric(ctx, metrics.MetricCPUSTimeSeconds, test, bi) + stimeAvg, stimeMax, err := getSummableMetric(ctx, metrics.MetricCPUSTimeSeconds, scenario, bi) if err != nil { return 0, 0, err } @@ -184,13 +214,18 @@ func getCPUTime(ctx context.Context, test string, bi *model.SampleStream) (float return utimeAvg + stimeAvg, utimeMax + stimeMax, nil } -// getSummableMetric returns average and maximum sum of metrics for all workers of the given coarse benchmark instance -// ("oasis_up" metric). -func getSummableMetric(ctx context.Context, metric, test string, bi *model.SampleStream) (float64, float64, error) { +// getSummableMetric returns average and maximum sum of metrics for all workers +// of the given coarse benchmark instance ("oasis_up" metric). +func getSummableMetric( + ctx context.Context, + metric, scenario string, + bi *model.SampleStream, +) (float64, float64, error) { instance := string(bi.Metric[metrics.MetricsLabelInstance]) labels := bi.Metric.Clone() - // Existing job denotes the "oasis-test-runner" worker only. We want to sum disk space across all workers. + // Existing job denotes the "oasis-test-runner" worker only. We want to sum + // disk space across all workers. delete(labels, "job") // We will average metric over all runs. delete(labels, "run") @@ -199,8 +234,10 @@ func getSummableMetric(ctx context.Context, metric, test string, bi *model.Sampl query := fmt.Sprintf("sum by (run) (%s %s)", metric, labels.String()) - // Fetch value at last recorded time. Some metrics might not be available anymore, if prometheus was shut down. - // Add one additional minute to capture reported values within the last minute period. + // Fetch value at last recorded time. + // Some metrics might not be available anymore, if prometheus was shut down. + // Add one additional minute to capture reported values within the last + // minute period. t := bi.Values[len(bi.Values)-1].Timestamp.Time().Add(time.Minute) result, warnings, err := v1api.Query(ctx, query, t) @@ -211,7 +248,10 @@ func getSummableMetric(ctx context.Context, metric, test string, bi *model.Sampl cmpLogger.Warn("warnings while querying Prometheus", "warnings", warnings) } if len(result.(model.Vector)) == 0 { - return 0, 0, fmt.Errorf("getSummableMetric: no time series matched test: %s and instance: %s", test, instance) + return 0, 0, fmt.Errorf( + "getSummableMetric: no time series matched scenario: %s and instance: %s", + scenario, instance, + ) } // Compute average and max values. @@ -228,9 +268,13 @@ func getSummableMetric(ctx context.Context, metric, test string, bi *model.Sampl return avg, max, nil } -// getNetwork returns average and maximum amount of network activity for all workers of the given coarse benchmark -// instance ("oasis_up" metric). -func getNetwork(ctx context.Context, test string, bi *model.SampleStream) (float64, float64, error) { +// getNetwork returns average and maximum amount of network activity for all +// workers of the given coarse benchmark instance ("oasis_up" metric). +func getNetwork( + ctx context.Context, + scenario string, + bi *model.SampleStream, +) (float64, float64, error) { instance := string(bi.Metric[metrics.MetricsLabelInstance]) labels := bi.Metric.Clone() @@ -261,7 +305,10 @@ func getNetwork(ctx context.Context, test string, bi *model.SampleStream) (float cmpLogger.Warn("warnings while querying Prometheus", "warnings", warnings) } if len(result.(model.Matrix)) == 0 { - return 0, 0, fmt.Errorf("getNetworkMetric: no time series matched test: %s and instance: %s", test, instance) + return 0, 0, fmt.Errorf( + "getNetworkMetric: no time series matched scenario: %s and instance: %s", + scenario, instance, + ) } // Compute average and max values. @@ -285,31 +332,41 @@ func getNetwork(ctx context.Context, test string, bi *model.SampleStream) (float nil } -// getCoarseBenchmarkInstances finds time series based on "oasis_up" metric w/ minute resolution for the given test and -// labels ordered from the oldest to the most recent ones. +// getCoarseBenchmarkInstances finds time series based on "oasis_up" metric with +// minute resolution for the given scenario and labels ordered from the oldest +// to the most recent ones. // -// This function is called initially to determine benchmark instances to compare. Then, the metric-specific operation -// fetches time series with finer (second) granularity. +// This function is called initially to determine benchmark instances to +// compare. Afterwards, the metric-specific operation fetches time series with +// finer (second) granularity. // -// NB: Due to Prometheus limit, this function fetches time series in the past 183 hours only. -func getCoarseBenchmarkInstances(ctx context.Context, test string, labels map[string]string) (model.Matrix, error) { +// NOTE: Due to Prometheus limit, this function fetches time series in the past +// 183 hours only. +func getCoarseBenchmarkInstances( + ctx context.Context, + scenario string, + labels map[string]string, +) (model.Matrix, error) { v1api := prometheusAPI.NewAPI(client) r := prometheusAPI.Range{ - // XXX: Hardcoded max resolution in Prometheus is 11,000 points or ~183 hours with minute resolution. + // XXX: Hardcoded max resolution in Prometheus is 11,000 points or ~183 + // hours with minute resolution. Start: time.Now().Add(-183 * time.Hour), End: time.Now(), Step: time.Minute, } ls := model.LabelSet{ - "job": metrics.MetricsJobTestRunner, - metrics.MetricsLabelTest: model.LabelValue(test), + "job": metrics.MetricsJobTestRunner, + metrics.MetricsLabelScenario: model.LabelValue(scenario), } for k, v := range labels { ls[model.LabelName(k)] = model.LabelValue(v) } - query := fmt.Sprintf("max(%s %s) by (%s) == 1.0", metrics.MetricUp, ls.String(), metrics.MetricsLabelInstance) + query := fmt.Sprintf("max(%s %s) by (%s) == 1.0", + metrics.MetricUp, ls.String(), metrics.MetricsLabelInstance, + ) result, warnings, err := v1api.QueryRange(ctx, query, r) if err != nil { cmpLogger.Error("error querying Prometheus", "err", err) @@ -340,46 +397,103 @@ func instanceName(s *model.SampleStream) string { return string(s.Metric[metrics.MetricsLabelInstance]) } -// fetchAndCompare fetches the given metric from prometheus and compares the results. +// fetchAndCompare fetches the given metric from prometheus and compares the +// results. // -// Returns false, if metric-specific ratios are exceeded or there is a problem obtaining time series. Otherwise true. -func fetchAndCompare(ctx context.Context, m, test string, sInstance, tInstance *model.SampleStream) (succ bool) { +// If metric-specific ratios are exceeded or if there is a problem obtaining +// time series, returns false. Otherwise, returns true. +func fetchAndCompare( + ctx context.Context, + m, scenario string, + sInstance, tInstance *model.SampleStream, +) (succ bool) { getMetric := allMetrics[m].getter succ = true - sAvg, sMax, err := getMetric(ctx, test, sInstance) + sAvg, sMax, err := getMetric(ctx, scenario, sInstance) if err != nil { - cmpLogger.Error("error fetching source benchmark instance", "metric", m, "test", test, "instance", instanceName(sInstance), "err", err) + cmpLogger.Error("error fetching source benchmark instance", + "metric", m, + "scenario", scenario, + "instance", instanceName(sInstance), + "err", err, + ) return false } - tAvg, tMax, err := getMetric(ctx, test, tInstance) + tAvg, tMax, err := getMetric(ctx, scenario, tInstance) if err != nil { - cmpLogger.Error("error fetching target test instance", "metric", m, "test", test, "instance", instanceName(sInstance), "err", err) + cmpLogger.Error("error fetching target scenario instance", + "metric", m, + "scenario", scenario, + "instance", instanceName(sInstance), + "err", err, + ) return false } - // Compare average and max metric values and log error, if they exceed or don't reach required ratios. + // Compare average and max metric values and log error(s) if they exceed or + // don't reach required ratios. maxAvgRatio := allMetrics[m].maxThresholdAvgRatio maxMaxRatio := allMetrics[m].maxThresholdMaxRatio minAvgRatio := allMetrics[m].minThresholdAvgRatio minMaxRatio := allMetrics[m].minThresholdMaxRatio - cmpLogger.Info("obtained average ratio", "metric", m, "test", test, "source_avg", sAvg, "target_avg", tAvg, "ratio", sAvg/tAvg) + cmpLogger.Info("obtained average ratio", + "metric", m, + "scenario", scenario, + "source_avg", sAvg, + "target_avg", tAvg, + "ratio", sAvg/tAvg, + ) if maxAvgRatio != 0 && sAvg/tAvg > maxAvgRatio { - cmpLogger.Error("average metric value exceeds max allowed ratio", "metric", m, "test", test, "source_avg", sAvg, "target_avg", tAvg, "ratio", sAvg/tAvg, "max_allowed_avg_ratio", maxAvgRatio) + cmpLogger.Error("average metric value exceeds max allowed ratio", + "metric", m, + "scenario", scenario, + "source_avg", sAvg, + "target_avg", tAvg, + "ratio", sAvg/tAvg, + "max_allowed_avg_ratio", maxAvgRatio, + ) succ = false } if minAvgRatio != 0 && sAvg/tAvg < minAvgRatio { - cmpLogger.Error("average metric value doesn't reach min required ratio", "metric", m, "test", test, "source_avg", sAvg, "target_avg", tAvg, "ratio", sAvg/tAvg, "min_required_avg_ratio", minAvgRatio) + cmpLogger.Error("average metric value doesn't reach min required ratio", + "metric", m, + "scenario", scenario, + "source_avg", sAvg, + "target_avg", tAvg, + "ratio", sAvg/tAvg, + "min_required_avg_ratio", minAvgRatio, + ) succ = false } - cmpLogger.Info("obtained max ratio", "metric", m, "test", test, "source_max", sMax, "target_max", tMax, "ratio", sMax/tMax) + cmpLogger.Info("obtained max ratio", + "metric", m, + "scenario", scenario, + "source_max", sMax, + "target_max", tMax, + "ratio", sMax/tMax, + ) if maxMaxRatio != 0 && sMax/tMax > maxMaxRatio { - cmpLogger.Error("maximum metric value exceeds max ratio", "metric", m, "test", test, "source_max", sMax, "target_max", tMax, "ratio", sMax/tMax, "max_allowed_max_ratio", maxMaxRatio) + cmpLogger.Error("maximum metric value exceeds max ratio", + "metric", m, + "scenario", scenario, + "source_max", sMax, + "target_max", tMax, + "ratio", sMax/tMax, + "max_allowed_max_ratio", maxMaxRatio, + ) succ = false } if minMaxRatio != 0 && sMax/tMax < maxMaxRatio { - cmpLogger.Error("maximum metric value doesn't reach min required ratio", "metric", m, "test", test, "source_max", sMax, "target_max", tMax, "ratio", sMax/tMax, "min_required_max_ratio", maxMaxRatio) + cmpLogger.Error("maximum metric value doesn't reach min required ratio", + "metric", m, + "scenario", scenario, + "source_max", sMax, + "target_max", tMax, + "ratio", sMax/tMax, + "min_required_max_ratio", maxMaxRatio, + ) succ = false } @@ -422,70 +536,87 @@ func runCmp(cmd *cobra.Command, args []string) { } ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - tests := viper.GetStringSlice(common.CfgTest) - if len(tests) == 0 { + scenarios := viper.GetStringSlice(common.CfgScenarioRegex) + if len(scenarios) == 0 { for _, s := range common.GetDefaultScenarios() { - tests = append(tests, s.Name()) + scenarios = append(scenarios, s.Name()) } } succ := true - for _, test := range tests { - sLabels, tLabels := map[string]string{}, map[string]string{} + for _, sc := range scenarios { + srcLabels, tgtLabels := map[string]string{}, map[string]string{} if viper.IsSet(cfgMetricsSourceGitBranch) { - sLabels[metrics.MetricsLabelGitBranch] = viper.GetString(cfgMetricsSourceGitBranch) + srcLabels[metrics.MetricsLabelGitBranch] = viper.GetString(cfgMetricsSourceGitBranch) } if viper.IsSet(cfgMetricsTargetGitBranch) { - tLabels[metrics.MetricsLabelGitBranch] = viper.GetString(cfgMetricsTargetGitBranch) + tgtLabels[metrics.MetricsLabelGitBranch] = viper.GetString(cfgMetricsTargetGitBranch) } // Set other required Prometheus labels, if passed. - // TODO: Integrate test parameters and parameter set combinations if multiple values provided like we do in oasis-test-runner. + // TODO: Integrate scenario parameters and parameter set combinations if + // multiple values are provided like we do in oasis-test-runner. for k, v := range viper.GetStringMapString(metrics.CfgMetricsLabels) { - sLabels[k] = v - tLabels[k] = v + srcLabels[k] = v + tgtLabels[k] = v } - sInstances, err := getCoarseBenchmarkInstances(ctx, test, sLabels) + srcScInstances, err := getCoarseBenchmarkInstances(ctx, sc, srcLabels) if err != nil { - cmpLogger.Error("error querying for source test instances", "err", err) + cmpLogger.Error("error querying for source scenario instances", "err", err) os.Exit(1) } - sNames := instanceNames(sInstances) - tInstances, err := getCoarseBenchmarkInstances(ctx, test, tLabels) + srcScNames := instanceNames(srcScInstances) + tgtScInstances, err := getCoarseBenchmarkInstances(ctx, sc, tgtLabels) if err != nil { - cmpLogger.Error("error querying for target test instances", "err", err) + cmpLogger.Error("error querying for target scenario instances", "err", err) os.Exit(1) } - tNames := instanceNames(tInstances) + tgtScNames := instanceNames(tgtScInstances) - if len(sNames) == 0 { - cmpLogger.Info("test does not have any source benchmark instances to compare, ignoring", "test", test) + if len(srcScNames) == 0 { + cmpLogger.Info( + "scenario does not have any source benchmark instances to compare, ignoring", + "scenario", sc, + ) continue } - if len(tNames) == 0 { - cmpLogger.Info("test does not have any target benchmark instances to compare, ignoring", "test", test) + if len(tgtScNames) == 0 { + cmpLogger.Info( + "scenario does not have any target benchmark instances to compare, ignoring", + "scenario", sc, + ) continue } - var sInstance, tInstance *model.SampleStream - if sNames[len(sNames)-1] != tNames[len(tNames)-1] { + var srcInstance, tgtInstance *model.SampleStream + if srcScNames[len(srcScNames)-1] != tgtScNames[len(tgtScNames)-1] { // Benchmark instances differ e.g. because of different gitBranch. - sInstance = sInstances[len(sInstances)-1] - tInstance = tInstances[len(tInstances)-1] + srcInstance = srcScInstances[len(srcScInstances)-1] + tgtInstance = tgtScInstances[len(tgtScInstances)-1] } else { - // Last benchmark instances are equal, pick the pre-last one from the target instances. - if len(tNames) < 2 { - cmpLogger.Info("test has only one benchmark instance, ignoring", "test", test, "source_instances", sNames, "target_instances", tNames) + // Last benchmark instances are equal, pick the pre-last one from + // the target instances. + if len(tgtScNames) < 2 { + cmpLogger.Info("scenario only has one benchmark instance, ignoring", + "scenario", sc, + "source_instances", srcScNames, + "target_instances", tgtScNames, + ) continue } - sInstance = sInstances[len(sInstances)-1] - tInstance = tInstances[len(tInstances)-2] + srcInstance = srcScInstances[len(srcScInstances)-1] + tgtInstance = tgtScInstances[len(tgtScInstances)-2] } - cmpLogger.Info("obtained source and target instance", "test", test, "source_instance", instanceName(sInstance), "target_instance", instanceName(tInstance)) + cmpLogger.Info("obtained source and target instance", + "scenario", sc, + "source_instance", instanceName(srcInstance), + "target_instance", instanceName(tgtInstance), + ) for _, m := range userMetrics { - // Don't put succ = succ && f oneliner here, because f won't get executed once succ = false. - fSucc := fetchAndCompare(ctx, m, test, sInstance, tInstance) + // Don't put succ = succ && f oneliner here, because f won't get + // executed once succ = false. + fSucc := fetchAndCompare(ctx, m, sc, srcInstance, tgtInstance) succ = succ && fSucc } } @@ -504,15 +635,43 @@ func Register(parentCmd *cobra.Command) { var metricNames []string for k := range allMetrics { metricNames = append(metricNames, k) - cmpFlags.Float64Var(&allMetrics[k].maxThresholdAvgRatio, fmt.Sprintf("max_threshold.%s.avg_ratio", k), allMetrics[k].maxThresholdAvgRatio, fmt.Sprintf("maximum allowed ratio between average %s metrics", k)) - cmpFlags.Float64Var(&allMetrics[k].maxThresholdMaxRatio, fmt.Sprintf("max_threshold.%s.max_ratio", k), allMetrics[k].maxThresholdMaxRatio, fmt.Sprintf("maximum allowed ratio between maximum %s metrics", k)) - cmpFlags.Float64Var(&allMetrics[k].minThresholdAvgRatio, fmt.Sprintf("min_threshold.%s.avg_ratio", k), allMetrics[k].minThresholdAvgRatio, fmt.Sprintf("minimum required ratio between average %s metrics", k)) - cmpFlags.Float64Var(&allMetrics[k].minThresholdMaxRatio, fmt.Sprintf("min_threshold.%s.max_ratio", k), allMetrics[k].minThresholdMaxRatio, fmt.Sprintf("minimum required ratio between maximum %s metrics", k)) + cmpFlags.Float64Var( + &allMetrics[k].maxThresholdAvgRatio, + fmt.Sprintf("max_threshold.%s.avg_ratio", k), + allMetrics[k].maxThresholdAvgRatio, + fmt.Sprintf("maximum allowed ratio between average %s metrics", k), + ) + cmpFlags.Float64Var( + &allMetrics[k].maxThresholdMaxRatio, + fmt.Sprintf("max_threshold.%s.max_ratio", k), + allMetrics[k].maxThresholdMaxRatio, + fmt.Sprintf("maximum allowed ratio between maximum %s metrics", k), + ) + cmpFlags.Float64Var( + &allMetrics[k].minThresholdAvgRatio, + fmt.Sprintf("min_threshold.%s.avg_ratio", k), + allMetrics[k].minThresholdAvgRatio, + fmt.Sprintf("minimum required ratio between average %s metrics", k), + ) + cmpFlags.Float64Var( + &allMetrics[k].minThresholdMaxRatio, + fmt.Sprintf("min_threshold.%s.max_ratio", k), + allMetrics[k].minThresholdMaxRatio, + fmt.Sprintf("minimum required ratio between maximum %s metrics", k), + ) } cmpFlags.StringSliceVarP(&userMetrics, cfgMetrics, cfgMetricsP, metricNames, "metrics to compare") - cmpFlags.String(cfgMetricsSourceGitBranch, "", "(optional) git_branch label for the source benchmark instance") - cmpFlags.String(cfgMetricsTargetGitBranch, "", "(optional) git_branch label for the target benchmark instance") + cmpFlags.String( + cfgMetricsSourceGitBranch, + "", + "(optional) git_branch label for the source benchmark instance", + ) + cmpFlags.String( + cfgMetricsTargetGitBranch, + "", + "(optional) git_branch label for the target benchmark instance", + ) cmpFlags.String(cfgMetricsNetDevice, "lo", "network device traffic to compare") _ = viper.BindPFlags(cmpFlags) diff --git a/go/oasis-test-runner/cmd/common/common.go b/go/oasis-test-runner/cmd/common/common.go index b7acb750877..27125a5907d 100644 --- a/go/oasis-test-runner/cmd/common/common.go +++ b/go/oasis-test-runner/cmd/common/common.go @@ -10,15 +10,15 @@ import ( ) const ( - CfgLogFmt = "log.format" - CfgLogLevel = "log.level" - CfgTest = "test" - CfgTestP = "t" + CfgLogFmt = "log.format" + CfgLogLevel = "log.level" + CfgScenarioRegex = "scenario" + CfgScenarioRegexShort = "s" // ScenarioParamsMask is the form of parameters passed to specific scenario. // - // [1] parameter is test name, [2] parameter is parameter name. This is - // used when binding and parsing (recursive) parameters with viper. + // [1] parameter is scenario name, [2] parameter is a parameter name. This + // is used when binding and parsing (recursive) parameters with viper. ScenarioParamsMask = "%[1]s.%[2]s" ) @@ -35,6 +35,7 @@ func GetScenarios() map[string]scenario.Scenario { } // GetScenarioNames returns the names of all scenarios. +// NOTE: Scenarios are sorted alphabetically. func GetScenarioNames() (names []string) { for name := range scenarios { names = append(names, name) diff --git a/go/oasis-test-runner/cmd/root.go b/go/oasis-test-runner/cmd/root.go index bf084e566bf..fc6d8a59173 100644 --- a/go/oasis-test-runner/cmd/root.go +++ b/go/oasis-test-runner/cmd/root.go @@ -49,7 +49,7 @@ var ( listCmd = &cobra.Command{ Use: "list", - Short: "List registered test cases", + Short: "List registered scenarios", Run: runList, } @@ -87,15 +87,17 @@ func RegisterNondefault(s scenario.Scenario) error { return fmt.Errorf("RegisterNondefault: error registering nondefault scenario: %w", err) } - RegisterTestParams(strings.ToLower(s.Name()), s.Parameters()) + RegisterScenarioParams(strings.ToLower(s.Name()), s.Parameters()) return nil } -// RegisterTestParams registers given scenario parameters as string slices regardless of actual type. +// RegisterScenarioParams registers parameters for a given scenario as string +// slices regardless of actual type. // -// Later we combine specific parameter sets and execute tests with all parameter combinations. -func RegisterTestParams(name string, p *env.ParameterFlagSet) { +// Later we combine specific parameter sets and execute scenarios with all +// parameter combinations. +func RegisterScenarioParams(name string, p *env.ParameterFlagSet) { fs := flag.NewFlagSet(name, flag.ContinueOnError) p.VisitAll(func(f *flag.Flag) { fs.StringSlice(name+"."+f.Name, []string{f.Value.String()}, f.Usage) @@ -104,20 +106,28 @@ func RegisterTestParams(name string, p *env.ParameterFlagSet) { _ = viper.BindPFlags(fs) } -// parseTestParams parses --.=,... flags combinations, clones provided proto-scenarios, and -// populates them so that each scenario instance has unique parameter set. Returns mapping test name -> list of scenario -// instances. NB: Golang maps are unordered so ordering of tests is *not preserved*. -func parseTestParams(toRun []scenario.Scenario) (map[string][]scenario.Scenario, error) { - r := make(map[string][]scenario.Scenario) - for _, s := range toRun { +// parseScenarioParams parses --.=,... flags +// combinations, clones provided proto-scenarios, and populates them so that +// each scenario instance has a unique parameter set. +// Returns a mapping: scenario name -> list of scenario instances. +// NOTE: Golang maps are unordered so ordering of scenarios is not preserved. +func parseScenarioParams(toRun []scenario.Scenario) (map[string][]scenario.Scenario, error) { + scListsToRun := make(map[string][]scenario.Scenario) + for _, sc := range toRun { zippedParams := make(map[string][]string) - s.Parameters().VisitAll(func(f *flag.Flag) { - // If no parameter values for test provided via CLI, use default registered values as fallback. - zippedParams[f.Name] = viper.GetStringSlice(fmt.Sprintf(common.ScenarioParamsMask, s.Name(), f.Name)) - - // Otherwise, apply parameter values from most specific to most general. - for _, p := range recurseScenarioName(s.Name()) { - paramName := fmt.Sprintf(common.ScenarioParamsMask, p, f.Name) + sc.Parameters().VisitAll(func(f *flag.Flag) { + // Default to parameter values that were registered as defaults for + // this particular scenario and parameter combination. + zippedParams[f.Name] = viper.GetStringSlice( + fmt.Sprintf(common.ScenarioParamsMask, sc.Name(), f.Name), + ) + + // Use parameter values that very explicitly set for a given + // (generalized) scenario and parameter combination. + // NOTE: Parameter values for more specific (generalized) scenario + // have preference over more generalized ones. + for _, genName := range generalizedScenarioName(sc.Name()) { + paramName := fmt.Sprintf(common.ScenarioParamsMask, genName, f.Name) if viper.IsSet(paramName) { zippedParams[f.Name] = viper.GetStringSlice(paramName) break @@ -127,33 +137,36 @@ func parseTestParams(toRun []scenario.Scenario) (map[string][]scenario.Scenario, parameterSets := computeParamSets(zippedParams, map[string]string{}) - // For each parameter set combination, clone a scenario and apply user-provided parameter value. - for _, ps := range parameterSets { - sCloned := s.Clone() - for k, userVal := range ps { - if err := sCloned.Parameters().Set(k, userVal); err != nil { - return nil, fmt.Errorf("parseTestParams: error setting viper parameter: %w", err) + // For each parameter set combination, clone a scenario and apply the + // provided parameter values. + for _, paramSet := range parameterSets { + sCloned := sc.Clone() + for param, val := range paramSet { + if err := sCloned.Parameters().Set(param, val); err != nil { + return nil, fmt.Errorf("parseScenarioParams: error setting viper parameter: %w", err) } } - r[s.Name()] = append(r[s.Name()], sCloned) + scListsToRun[sc.Name()] = append(scListsToRun[sc.Name()], sCloned) } - // Test has no parameters (incl. recursive ones) defined at all, keep original copy. + // Scenario has no parameters (incl. generalized ones) defined, keep + // original scenario. if len(parameterSets) == 0 { - r[s.Name()] = []scenario.Scenario{s} + scListsToRun[sc.Name()] = []scenario.Scenario{sc} } } - return r, nil + return scListsToRun, nil } -// recurseScenarioNames returns list of generalized scenario names from original name to most general. -func recurseScenarioName(name string) []string { +// generalizedScenarioNames returns list of generalized scenario names from the +// original name to most general name. +func generalizedScenarioName(name string) []string { dirs := strings.Split(name, "/") if len(dirs) == 1 { return []string{name} } - subNames := recurseScenarioName(strings.Join(dirs[0:len(dirs)-1], "/")) + subNames := generalizedScenarioName(strings.Join(dirs[0:len(dirs)-1], "/")) return append([]string{name}, subNames...) } @@ -203,7 +216,7 @@ func Register(s scenario.Scenario) error { return fmt.Errorf("Register: error registering nondefault scenario: %w", err) } - RegisterTestParams(strings.ToLower(s.Name()), s.Parameters()) + RegisterScenarioParams(strings.ToLower(s.Name()), s.Parameters()) return nil } @@ -271,7 +284,7 @@ func runRoot(cmd *cobra.Command, args []string) error { // Enumerate requested scenarios. toRun := common.GetDefaultScenarios() // Run all default scenarios if not set. - if scNameRegexes := viper.GetStringSlice(common.CfgTest); len(scNameRegexes) > 0 { + if scNameRegexes := viper.GetStringSlice(common.CfgScenarioRegex); len(scNameRegexes) > 0 { matched := make(map[scenario.Scenario]bool) for _, scNameRegex := range scNameRegexes { // Make sure the given scenario name regex matches the whole scenario name, not just @@ -326,23 +339,25 @@ func runRoot(cmd *cobra.Command, args []string) error { ) } - // Parse test parameters passed by CLI. + // Expand the list of scenarios to run with the passed scenario parameters. var toRunExploded map[string][]scenario.Scenario - toRunExploded, err = parseTestParams(toRun) + toRunExploded, err = parseScenarioParams(toRun) if err != nil { - return fmt.Errorf("root: failed to parse test params: %w", err) + return fmt.Errorf("root: failed to parse scenario parameters: %w", err) } - // Run all test instances. + // Run all requested scenarios. index := 0 for run := 0; run < numRuns; run++ { - // Walk through toRun instead of toRunExploded to preserve tests ordering. + // Iterate through toRun instead of toRunExploded to preserve scenario + // ordering. for _, sc := range toRun { name := sc.Name() scs := toRunExploded[name] for i, v := range scs { - // If number of runs is greater than 1 or there are multiple parameter sets for test, maintain unique - // scenario datadir by appending unique run ID. + // If number of runs is greater than 1 or if there are multiple + // parameter sets for a scenario, maintain unique scenario + // datadir by appending unique run ID. n := name runID := run*len(scs) + i if numRuns > 1 || len(scs) > 1 { @@ -350,49 +365,47 @@ func runRoot(cmd *cobra.Command, args []string) error { } if index%parallelJobCount != parallelJobIndex { - logger.Info("skipping test case (assigned to different parallel job)", - "test", name, "run_id", runID, + logger.Info("skipping scenario (assigned to different parallel job)", + "scenario", name, "run_id", runID, ) index++ continue } if excludeMap[strings.ToLower(v.Name())] { - logger.Info("skipping test case (excluded by environment)", - "test", name, "run_id", runID, + logger.Info("skipping scenario (excluded by environment)", + "scenario", name, "run_id", runID, ) index++ continue } - logger.Info("running test case", - "test", name, "run_id", runID, + logger.Info("running scenario", + "scenario", name, "run_id", runID, ) - childEnv, err := rootEnv.NewChild(n, &env.TestInstanceInfo{ - Test: v.Name(), + childEnv, err := rootEnv.NewChild(n, &env.ScenarioInstanceInfo{ + Scenario: v.Name(), Instance: filepath.Base(rootEnv.Dir()), ParameterSet: v.Parameters(), Run: run, }) if err != nil { logger.Error("failed to setup child environment", - "err", err, - "test", name, - "run_id", runID, + "err", err, "scenario", name, "run_id", runID, ) return fmt.Errorf("root: failed to setup child environment: %w", err) } // Dump current parameter set to file. - if err = childEnv.WriteTestInfo(); err != nil { + if err = childEnv.WriteScenarioInfo(); err != nil { return err } // Init per-run prometheus pusher, if metrics are enabled. if viper.IsSet(metrics.CfgMetricsAddr) { pusher = push.New(viper.GetString(metrics.CfgMetricsAddr), metrics.MetricsJobTestRunner) - labels := metrics.GetDefaultPushLabels(childEnv.TestInfo()) + labels := metrics.GetDefaultPushLabels(childEnv.ScenarioInfo()) for k, v := range labels { pusher = pusher.Grouping(k, v) } @@ -400,22 +413,22 @@ func runRoot(cmd *cobra.Command, args []string) error { } if err = doScenario(childEnv, v); err != nil { - logger.Error("failed to run test case", + logger.Error("failed to run scenario", "err", err, - "test", name, + "scenario", name, "run_id", runID, ) - err = fmt.Errorf("root: failed to run test case: %w", err) + err = fmt.Errorf("root: failed to run scenario: %w", err) } if cleanErr := doCleanup(childEnv); cleanErr != nil { - logger.Error("failed to clean up child envionment", + logger.Error("failed to clean up child environment", "err", cleanErr, - "test", name, + "scenario", name, "run_id", runID, ) if err == nil { - err = fmt.Errorf("root: failed to clean up child enviroment: %w", cleanErr) + err = fmt.Errorf("root: failed to clean up child environment: %w", cleanErr) } } @@ -423,8 +436,8 @@ func runRoot(cmd *cobra.Command, args []string) error { return err } - logger.Info("passed test case", - "test", name, "run_id", runID, + logger.Info("passed scenario", + "scenario", name, "run_id", runID, ) index++ @@ -438,12 +451,12 @@ func runRoot(cmd *cobra.Command, args []string) error { func doScenario(childEnv *env.Env, sc scenario.Scenario) (err error) { defer func() { if r := recover(); r != nil { - err = fmt.Errorf("root: panic caught running test case: %v: %s", r, debug.Stack()) + err = fmt.Errorf("root: panic caught running scenario: %v: %s", r, debug.Stack()) } }() if err = sc.PreInit(childEnv); err != nil { - err = fmt.Errorf("root: failed to pre-initialize test case: %w", err) + err = fmt.Errorf("root: failed to pre-initialize scenario: %w", err) return } @@ -463,14 +476,14 @@ func doScenario(childEnv *env.Env, sc scenario.Scenario) (err error) { } } - // If network is used, enable shorter per-node socket paths, because some e2e test datadir - // exceed maximum unix socket path length. + // If network is used, enable shorter per-node socket paths, because some + // datadir for some scenarios exceed the maximum unix socket path length. if net != nil { net.Config().UseShortGrpcSocketPaths = true } if err = sc.Init(childEnv, net); err != nil { - err = fmt.Errorf("root: failed to initialize test case: %w", err) + err = fmt.Errorf("root: failed to initialize scenario: %w", err) return } @@ -483,7 +496,7 @@ func doScenario(childEnv *env.Env, sc scenario.Scenario) (err error) { } if err = sc.Run(childEnv); err != nil { - err = fmt.Errorf("root: failed to run test case: %w", err) + err = fmt.Errorf("root: failed to run scenario: %w", err) return } @@ -501,7 +514,7 @@ func doScenario(childEnv *env.Env, sc scenario.Scenario) (err error) { func doCleanup(childEnv *env.Env) (err error) { defer func() { if r := recover(); r != nil { - err = fmt.Errorf("root: panic caught cleaning up test case: %v, %s", r, debug.Stack()) + err = fmt.Errorf("root: panic caught cleaning up scenario: %v, %s", r, debug.Stack()) } }() @@ -511,23 +524,17 @@ func doCleanup(childEnv *env.Env) (err error) { } func runList(cmd *cobra.Command, args []string) { - switch len(common.GetScenarios()) { + scNames := common.GetScenarioNames() + switch len(scNames) { case 0: - fmt.Printf("No supported test cases!\n") + fmt.Printf("No scenarios are available.\n") default: - fmt.Printf("Supported test cases:\n") - - // Sort scenarios alphabetically before printing. - var scenarioNames []string - for name := range common.GetScenarios() { - scenarioNames = append(scenarioNames, name) - } - sort.Strings(scenarioNames) + fmt.Printf("Available scenarios:\n") - for _, n := range scenarioNames { - fmt.Printf(" * %v", n) + for _, name := range scNames { + fmt.Printf(" * %v", name) var intro bool - common.GetScenarios()[n].Parameters().VisitAll(func(f *flag.Flag) { + common.GetScenarios()[name].Parameters().VisitAll(func(f *flag.Flag) { if !intro { fmt.Printf(" (parameters:") intro = true @@ -550,18 +557,31 @@ func init() { persistentFlags := flag.NewFlagSet("", flag.ContinueOnError) persistentFlags.Var(&logFmt, common.CfgLogFmt, "log format") persistentFlags.Var(&logLevel, common.CfgLogLevel, "log level") - persistentFlags.StringSliceP(common.CfgTest, common.CfgTestP, nil, "regexp patterns matching names of tests") + persistentFlags.StringSliceP( + common.CfgScenarioRegex, + common.CfgScenarioRegexShort, + nil, + "regexp patterns matching names of scenarios", + ) persistentFlags.String(metrics.CfgMetricsAddr, "", "Prometheus address") - persistentFlags.StringToString(metrics.CfgMetricsLabels, map[string]string{}, "override Prometheus labels") + persistentFlags.StringToString( + metrics.CfgMetricsLabels, + map[string]string{}, + "override Prometheus labels", + ) _ = viper.BindPFlags(persistentFlags) rootCmd.PersistentFlags().AddFlagSet(persistentFlags) // Register flags. rootFlags := flag.NewFlagSet("", flag.ContinueOnError) rootFlags.StringVar(&cfgFile, cfgConfigFile, "", "config file") - rootFlags.Bool(cfgLogNoStdout, false, "do not mutiplex logs to stdout") - rootFlags.Duration(metrics.CfgMetricsInterval, 5*time.Second, "metrics push interval for test runner and oasis nodes") - rootFlags.IntVarP(&numRuns, cfgNumRuns, "n", 1, "number of runs for given test(s)") + rootFlags.Bool(cfgLogNoStdout, false, "do not multiplex logs to stdout") + rootFlags.Duration( + metrics.CfgMetricsInterval, + 5*time.Second, + "metrics push interval for test runner and oasis nodes", + ) + rootFlags.IntVarP(&numRuns, cfgNumRuns, "n", 1, "number of runs for given scenario(s)") rootFlags.Int(cfgParallelJobCount, 1, "(for CI) number of overall parallel jobs") rootFlags.Int(cfgParallelJobIndex, 0, "(for CI) index of this parallel job") _ = viper.BindPFlags(rootFlags) diff --git a/go/oasis-test-runner/cmd/root_test.go b/go/oasis-test-runner/cmd/root_test.go index e009d4f5783..461b3c3782a 100644 --- a/go/oasis-test-runner/cmd/root_test.go +++ b/go/oasis-test-runner/cmd/root_test.go @@ -68,15 +68,15 @@ func TestComputeParamSets(t *testing.T) { require.Equal(t, expectedParamSets, computeParamSets(zippedParams, map[string]string{})) } -func TestRecurseScenarioName(t *testing.T) { +func TestGeneralizedScenarioName(t *testing.T) { var expectedNames []string expectedNames = []string{"e2e/runtime/abc/def", "e2e/runtime/abc", "e2e/runtime", "e2e"} - require.Equal(t, expectedNames, recurseScenarioName("e2e/runtime/abc/def")) + require.Equal(t, expectedNames, generalizedScenarioName("e2e/runtime/abc/def")) expectedNames = []string{"e2e"} - require.Equal(t, expectedNames, recurseScenarioName("e2e")) + require.Equal(t, expectedNames, generalizedScenarioName("e2e")) expectedNames = []string{""} - require.Equal(t, expectedNames, recurseScenarioName("")) + require.Equal(t, expectedNames, generalizedScenarioName("")) } diff --git a/go/oasis-test-runner/env/env.go b/go/oasis-test-runner/env/env.go index 4cf1bbafeb3..62b1ea2de99 100644 --- a/go/oasis-test-runner/env/env.go +++ b/go/oasis-test-runner/env/env.go @@ -1,4 +1,4 @@ -// Package env defines a test environment. +// Package env defines a scenario environment. package env import ( @@ -17,7 +17,7 @@ import ( ) // ErrEarlyTerm is the error passed over the error channel when a -// sub-process termiantes prior to the Cleanup. +// sub-process terminates prior to the Cleanup. var ErrEarlyTerm = errors.New("env: sub-process exited early") // CmdAttrs is the SysProcAttr that will ensure graceful cleanup. @@ -36,18 +36,18 @@ type ParameterFlagSet struct { errorHandling flag.ErrorHandling } -// TestInstanceInfo contains information of the current test run. -type TestInstanceInfo struct { - // Test is the name of the test. - Test string `json:"test"` +// ScenarioInstanceInfo contains information of the current scenario run. +type ScenarioInstanceInfo struct { + // Scenario is the name of the scenario. + Scenario string `json:"scenario"` - // Instance is the instance name of the test. e.g. oasis-test-runner123456 + // Instance is the name of the scenario instance (e.g. oasis-test-runner123456). Instance string `json:"instance"` - // ParameterSet is the parameter set the test was run with. + // ParameterSet is the parameter set the scenario was run with. ParameterSet *ParameterFlagSet `json:"parameter_set"` - // Run is the number of run. + // Run is the number of the run. Run int `json:"run"` } @@ -97,11 +97,11 @@ type Env struct { parentElem *list.Element children *list.List - dir *Dir - testInfo *TestInstanceInfo - cleanupFns []CleanupFn - cleanupCmds []*cmdMonitor - cleanupLock sync.Mutex + dir *Dir + scenarioInfo *ScenarioInstanceInfo + cleanupFns []CleanupFn + cleanupCmds []*cmdMonitor + cleanupLock sync.Mutex isInCleanup bool } @@ -126,9 +126,9 @@ func (env *Env) NewSubDir(subDirName string) (*Dir, error) { return env.dir.NewSubDir(subDirName) } -// TestInfo returns the test instance information. -func (env *Env) TestInfo() *TestInstanceInfo { - return env.testInfo +// ScenarioInfo returns the scenario instance information. +func (env *Env) ScenarioInfo() *ScenarioInstanceInfo { + return env.scenarioInfo } // AddOnCleanup adds a cleanup routine to be called durring the environment's @@ -208,7 +208,7 @@ func (env *Env) Cleanup() { } // NewChild returns a new child test environment. -func (env *Env) NewChild(childName string, testInfo *TestInstanceInfo) (*Env, error) { +func (env *Env) NewChild(childName string, scInfo *ScenarioInstanceInfo) (*Env, error) { var parentDir *Dir if env.parent != nil { parentDir = env.parent.dir @@ -222,24 +222,25 @@ func (env *Env) NewChild(childName string, testInfo *TestInstanceInfo) (*Env, er } child := &Env{ - name: childName, - parent: env, - children: list.New(), - dir: subDir, - testInfo: testInfo, + name: childName, + parent: env, + children: list.New(), + dir: subDir, + scenarioInfo: scInfo, } child.parentElem = env.children.PushBack(child) return child, nil } -// WriteTestInfo dumps test instance parameter set to test_info.json file for debugging afterwards. -func (env *Env) WriteTestInfo() error { - b, err := json.Marshal(env.testInfo) +// WriteScenarioInfo dumps scenario instance parameter set to scenario_info.json +// file for debugging afterwards. +func (env *Env) WriteScenarioInfo() error { + b, err := json.Marshal(env.scenarioInfo) if err != nil { return err } - if err = ioutil.WriteFile(filepath.Join(env.Dir(), "test_info.json"), b, 0o644); err != nil { // nolint: gosec + if err = ioutil.WriteFile(filepath.Join(env.Dir(), "scenario_info.json"), b, 0o644); err != nil { // nolint: gosec return err } diff --git a/go/oasis-test-runner/oasis/args.go b/go/oasis-test-runner/oasis/args.go index d57d901bbf1..7d7eb87ef43 100644 --- a/go/oasis-test-runner/oasis/args.go +++ b/go/oasis-test-runner/oasis/args.go @@ -459,7 +459,7 @@ func (args *argBuilder) appendNodeMetrics(node *Node) *argBuilder { // Append labels. args.vec = append(args.vec, "--"+metrics.CfgMetricsLabels) - ti := node.net.env.TestInfo() + ti := node.net.env.ScenarioInfo() labels := metrics.GetDefaultPushLabels(ti) var l []string for k, v := range labels { diff --git a/go/oasis-test-runner/scenario/e2e/e2e.go b/go/oasis-test-runner/scenario/e2e/e2e.go index 3dfae8e7795..21063e79432 100644 --- a/go/oasis-test-runner/scenario/e2e/e2e.go +++ b/go/oasis-test-runner/scenario/e2e/e2e.go @@ -300,7 +300,7 @@ func (sc *E2E) finishWithoutChild() error { // RegisterScenarios registers all end-to-end scenarios. func RegisterScenarios() error { // Register non-scenario-specific parameters. - cmd.RegisterTestParams(E2eParamsDummy.Name(), E2eParamsDummy.Parameters()) + cmd.RegisterScenarioParams(E2eParamsDummy.Name(), E2eParamsDummy.Parameters()) // Register default scenarios which are executed, if no test names provided. for _, s := range []scenario.Scenario{ diff --git a/go/oasis-test-runner/scenario/e2e/runtime/runtime.go b/go/oasis-test-runner/scenario/e2e/runtime/runtime.go index 178987d04d8..db2f5a05cd9 100644 --- a/go/oasis-test-runner/scenario/e2e/runtime/runtime.go +++ b/go/oasis-test-runner/scenario/e2e/runtime/runtime.go @@ -486,7 +486,7 @@ func (sc *runtimeImpl) initialEpochTransitions() error { // RegisterScenarios registers all end-to-end scenarios. func RegisterScenarios() error { // Register non-scenario-specific parameters. - cmd.RegisterTestParams(RuntimeParamsDummy.Name(), RuntimeParamsDummy.Parameters()) + cmd.RegisterScenarioParams(RuntimeParamsDummy.Name(), RuntimeParamsDummy.Parameters()) // Register default scenarios which are executed, if no test names provided. for _, s := range []scenario.Scenario{ diff --git a/go/oasis-test-runner/scenario/remotesigner/remotesigner.go b/go/oasis-test-runner/scenario/remotesigner/remotesigner.go index b777d97e444..68d6cf7ccd0 100644 --- a/go/oasis-test-runner/scenario/remotesigner/remotesigner.go +++ b/go/oasis-test-runner/scenario/remotesigner/remotesigner.go @@ -74,7 +74,7 @@ func (sc *remoteSignerImpl) Init(childEnv *env.Env, net *oasis.Network) error { // RegisterScenarios registers all scenarios for remote-signer. func RegisterScenarios() error { // Register non-scenario-specific parameters. - cmd.RegisterTestParams(RemoteSignerParamsDummy.Name(), RemoteSignerParamsDummy.Parameters()) + cmd.RegisterScenarioParams(RemoteSignerParamsDummy.Name(), RemoteSignerParamsDummy.Parameters()) // Register default scenarios which are executed, if no test names provided. for _, s := range []scenario.Scenario{