diff --git a/.github/workflows/docker-build-test.yaml b/.github/workflows/docker-build-test.yaml
index 10efb527cc79f..532773dcd8aaa 100644
--- a/.github/workflows/docker-build-test.yaml
+++ b/.github/workflows/docker-build-test.yaml
@@ -277,6 +277,7 @@ jobs:
# by this GHA. If there is a Forge namespace collision, Forge will pre-empt the existing test running in the namespace.
FORGE_NAMESPACE: forge-e2e-${{ needs.determine-docker-build-metadata.outputs.targetCacheId }}
SKIP_JOB: ${{ needs.file_change_determinator.outputs.only_docs_changed == 'true' }}
+ SEND_RESULTS_TO_TRUNK: true
# This job determines the last released docker image tag, which is used by forge compat test.
fetch-last-released-docker-image-tag:
@@ -356,6 +357,7 @@ jobs:
COMMENT_HEADER: forge-compat
FORGE_NAMESPACE: forge-compat-${{ needs.determine-docker-build-metadata.outputs.targetCacheId }}
SKIP_JOB: ${{ needs.file_change_determinator.outputs.only_docs_changed == 'true' }}
+ SEND_RESULTS_TO_TRUNK: true
# Run forge framework upgradability test. This is a PR required job.
forge-framework-upgrade-test:
@@ -385,6 +387,7 @@ jobs:
COMMENT_HEADER: forge-framework-upgrade
FORGE_NAMESPACE: forge-framework-upgrade-${{ needs.determine-docker-build-metadata.outputs.targetCacheId }}
SKIP_JOB: ${{ !contains(github.event.pull_request.labels.*.name, 'CICD:run-framework-upgrade-test') && (needs.test-target-determinator.outputs.run_framework_upgrade_test == 'false') }}
+ SEND_RESULTS_TO_TRUNK: true
forge-consensus-only-perf-test:
needs:
diff --git a/.github/workflows/forge-stable.yaml b/.github/workflows/forge-stable.yaml
index 73947c9936a33..88e80dbd4ab31 100644
--- a/.github/workflows/forge-stable.yaml
+++ b/.github/workflows/forge-stable.yaml
@@ -136,6 +136,7 @@ jobs:
FORGE_RUNNER_DURATION_SECS: 7200 # Run for 2 hours
FORGE_TEST_SUITE: framework_upgrade
POST_TO_SLACK: true
+ SEND_RESULTS_TO_TRUNK: true
run-forge-realistic-env-load-sweep:
if: ${{ github.event_name != 'pull_request' && always() }}
@@ -148,6 +149,7 @@ jobs:
FORGE_RUNNER_DURATION_SECS: 1800 # Run for 30 minutes (6 tests, each for 300 seconds)
FORGE_TEST_SUITE: realistic_env_load_sweep
POST_TO_SLACK: true
+ SEND_RESULTS_TO_TRUNK: true
run-forge-realistic-env-workload-sweep:
if: ${{ github.event_name != 'pull_request' && always() }}
@@ -160,6 +162,7 @@ jobs:
FORGE_RUNNER_DURATION_SECS: 2000 # Run for 33 minutes (5 tests, each for 400 seconds)
FORGE_TEST_SUITE: realistic_env_workload_sweep
POST_TO_SLACK: true
+ SEND_RESULTS_TO_TRUNK: true
run-forge-realistic-env-graceful-overload:
if: ${{ github.event_name != 'pull_request' && always() }}
@@ -172,6 +175,7 @@ jobs:
FORGE_RUNNER_DURATION_SECS: 1200 # Run for 20 minutes
FORGE_TEST_SUITE: realistic_env_graceful_overload
POST_TO_SLACK: true
+ SEND_RESULTS_TO_TRUNK: true
run-forge-realistic-env-graceful-workload-sweep:
if: ${{ github.event_name != 'pull_request' && always() }}
@@ -184,6 +188,7 @@ jobs:
FORGE_RUNNER_DURATION_SECS: 2100 # Run for 5 minutes per test, 7 tests.
FORGE_TEST_SUITE: realistic_env_graceful_workload_sweep
POST_TO_SLACK: true
+ SEND_RESULTS_TO_TRUNK: true
run-forge-realistic-env-fairness-workload-sweep:
if: ${{ github.event_name != 'pull_request' && always() }}
@@ -196,6 +201,7 @@ jobs:
FORGE_RUNNER_DURATION_SECS: 900 # Run for 5 minutes per test, 3 tests.
FORGE_TEST_SUITE: realistic_env_fairness_workload_sweep
POST_TO_SLACK: true
+ SEND_RESULTS_TO_TRUNK: true
run-forge-realistic-network-tuned-for-throughput:
if: ${{ github.event_name != 'pull_request' && always() }}
@@ -209,6 +215,7 @@ jobs:
FORGE_TEST_SUITE: realistic_network_tuned_for_throughput
FORGE_ENABLE_PERFORMANCE: true
POST_TO_SLACK: true
+ SEND_RESULTS_TO_TRUNK: true
### Forge Correctness/Componenet/Stress tests
@@ -223,6 +230,7 @@ jobs:
FORGE_RUNNER_DURATION_SECS: 2400 # Run for 40 minutes
FORGE_TEST_SUITE: consensus_stress_test
POST_TO_SLACK: true
+ SEND_RESULTS_TO_TRUNK: true
run-forge-workload-mix-test:
if: ${{ github.event_name != 'pull_request' && always() }}
@@ -235,6 +243,7 @@ jobs:
FORGE_RUNNER_DURATION_SECS: 900 # Run for 15 minutes
FORGE_TEST_SUITE: workload_mix
POST_TO_SLACK: true
+ SEND_RESULTS_TO_TRUNK: true
run-forge-single-vfn-perf:
if: ${{ github.event_name != 'pull_request' && always() }}
@@ -247,6 +256,7 @@ jobs:
FORGE_RUNNER_DURATION_SECS: 480 # Run for 8 minutes
FORGE_TEST_SUITE: single_vfn_perf
POST_TO_SLACK: true
+ SEND_RESULTS_TO_TRUNK: true
run-forge-fullnode-reboot-stress-test:
if: ${{ github.event_name != 'pull_request' && always() }}
@@ -259,6 +269,7 @@ jobs:
FORGE_RUNNER_DURATION_SECS: 1800 # Run for 30 minutes
FORGE_TEST_SUITE: fullnode_reboot_stress_test
POST_TO_SLACK: true
+ SEND_RESULTS_TO_TRUNK: true
### Compatibility Forge tests
@@ -275,6 +286,7 @@ jobs:
IMAGE_TAG: ${{ needs.determine-test-metadata.outputs.IMAGE_TAG_FOR_COMPAT_TEST }}
GIT_SHA: ${{ needs.determine-test-metadata.outputs.IMAGE_TAG }} # this is the git ref to checkout
POST_TO_SLACK: true
+ SEND_RESULTS_TO_TRUNK: true
### Changing working quorum Forge tests
@@ -289,6 +301,7 @@ jobs:
FORGE_RUNNER_DURATION_SECS: 1200 # Run for 20 minutes
FORGE_TEST_SUITE: changing_working_quorum_test
POST_TO_SLACK: true
+ SEND_RESULTS_TO_TRUNK: true
FORGE_ENABLE_FAILPOINTS: true
run-forge-changing-working-quorum-test-high-load:
@@ -302,6 +315,7 @@ jobs:
FORGE_RUNNER_DURATION_SECS: 900 # Run for 15 minutes
FORGE_TEST_SUITE: changing_working_quorum_test_high_load
POST_TO_SLACK: true
+ SEND_RESULTS_TO_TRUNK: true
FORGE_ENABLE_FAILPOINTS: true
# Measures PFN latencies with a constant TPS (with a realistic environment)
@@ -316,6 +330,7 @@ jobs:
FORGE_RUNNER_DURATION_SECS: 900 # Run for 15 minutes
FORGE_TEST_SUITE: pfn_const_tps_with_realistic_env
POST_TO_SLACK: true
+ SEND_RESULTS_TO_TRUNK: true
# longest test for last, to get useful signal from short tests first
@@ -331,3 +346,4 @@ jobs:
FORGE_RUNNER_DURATION_SECS: 7200 # Run for 2 hours
FORGE_TEST_SUITE: realistic_env_max_load_large
POST_TO_SLACK: true
+ SEND_RESULTS_TO_TRUNK: true
diff --git a/.github/workflows/workflow-run-forge.yaml b/.github/workflows/workflow-run-forge.yaml
index 05d0251194f8c..bf085906a15e0 100644
--- a/.github/workflows/workflow-run-forge.yaml
+++ b/.github/workflows/workflow-run-forge.yaml
@@ -87,6 +87,10 @@ on:
required: false
type: string
description: The deployer profile used to spin up and configure forge infrastructure
+ SEND_RESULTS_TO_TRUNK:
+ required: false
+ type: boolean
+ description: Send forge results to trunk.io
env:
AWS_ACCOUNT_NUM: ${{ secrets.ENV_ECR_AWS_ACCOUNT_NUM }}
@@ -118,6 +122,7 @@ env:
VERBOSE: true
FORGE_NUM_VALIDATORS: ${{ inputs.FORGE_NUM_VALIDATORS }}
FORGE_NUM_VALIDATOR_FULLNODES: ${{ inputs.FORGE_NUM_VALIDATOR_FULLNODES }}
+ FORGE_JUNIT_XML_PATH: ${{ inputs.SEND_RESULTS_TO_TRUNK && '/tmp/test.xml' || '' }}
# TODO: should we migrate this to a composite action, so that we can skip it
# at the call site, and don't need to wrap each step in an if statement?
@@ -228,3 +233,14 @@ jobs:
# Print out whether the job was skipped.
- run: echo "Skipping forge test!"
if: ${{ inputs.SKIP_JOB }}
+
+ - name: Upload results
+ # Run this step even if the test step ahead fails
+ if: ${{ !inputs.SKIP_JOB && inputs.SEND_RESULTS_TO_TRUNK && !cancelled() }}
+ uses: trunk-io/analytics-uploader@main
+ with:
+ # Configured in the nextest.toml file
+ junit-paths: ${{ env.FORGE_JUNIT_XML_PATH }}
+ org-slug: aptoslabs
+ token: ${{ secrets.TRUNK_API_TOKEN }}
+ continue-on-error: true
diff --git a/Cargo.lock b/Cargo.lock
index 91043badffca3..3281279bd27ff 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1767,6 +1767,7 @@ dependencies = [
"num_cpus",
"once_cell",
"prometheus-http-query",
+ "quick-junit",
"rand 0.7.3",
"regex",
"reqwest 0.11.23",
@@ -1774,11 +1775,13 @@ dependencies = [
"serde_json",
"serde_merge",
"serde_yaml 0.8.26",
+ "sugars",
"tempfile",
"termcolor",
"thiserror",
"tokio",
"url",
+ "uuid",
]
[[package]]
@@ -1805,6 +1808,7 @@ dependencies = [
"reqwest 0.11.23",
"serde_json",
"serde_yaml 0.8.26",
+ "sugars",
"tokio",
"url",
]
@@ -8873,7 +8877,7 @@ dependencies = [
"fixedbitset 0.4.2",
"guppy-summaries",
"guppy-workspace-hack",
- "indexmap 2.2.5",
+ "indexmap 2.6.0",
"itertools 0.12.1",
"nested",
"once_cell",
@@ -8922,7 +8926,7 @@ dependencies = [
"futures-sink",
"futures-util",
"http 0.2.11",
- "indexmap 2.2.5",
+ "indexmap 2.6.0",
"slab",
"tokio",
"tokio-util 0.7.10",
@@ -8941,7 +8945,7 @@ dependencies = [
"futures-core",
"futures-sink",
"http 1.1.0",
- "indexmap 2.2.5",
+ "indexmap 2.6.0",
"slab",
"tokio",
"tokio-util 0.7.10",
@@ -9021,6 +9025,12 @@ dependencies = [
"allocator-api2",
]
+[[package]]
+name = "hashbrown"
+version = "0.15.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb"
+
[[package]]
name = "hdrhistogram"
version = "7.5.4"
@@ -9714,12 +9724,12 @@ dependencies = [
[[package]]
name = "indexmap"
-version = "2.2.5"
+version = "2.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7b0b929d511467233429c45a44ac1dcaa21ba0f5ba11e4879e6ed28ddb4f9df4"
+checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da"
dependencies = [
"equivalent",
- "hashbrown 0.14.3",
+ "hashbrown 0.15.0",
"serde",
]
@@ -9766,7 +9776,7 @@ dependencies = [
"crossbeam-utils",
"dashmap",
"env_logger",
- "indexmap 2.2.5",
+ "indexmap 2.6.0",
"is-terminal",
"itoa",
"log",
@@ -11898,6 +11908,15 @@ version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e4a24736216ec316047a1fc4252e27dabb04218aa4a3f37c6e7ddbf1f9782b54"
+[[package]]
+name = "newtype-uuid"
+version = "1.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4f4933943834e236c864a48aefdc2da43885dbd5eb77bff3ab20f31e0c3146f5"
+dependencies = [
+ "uuid",
+]
+
[[package]]
name = "nix"
version = "0.26.4"
@@ -12600,7 +12619,7 @@ dependencies = [
"ciborium",
"coset",
"data-encoding",
- "indexmap 2.2.5",
+ "indexmap 2.6.0",
"rand 0.8.5",
"serde",
"serde_json",
@@ -12786,7 +12805,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db"
dependencies = [
"fixedbitset 0.4.2",
- "indexmap 2.2.5",
+ "indexmap 2.6.0",
]
[[package]]
@@ -13058,7 +13077,7 @@ dependencies = [
"bytes",
"derive_more",
"futures-util",
- "indexmap 2.2.5",
+ "indexmap 2.6.0",
"mime",
"num-traits",
"poem",
@@ -13081,7 +13100,7 @@ source = "git+https://github.com/poem-web/poem.git?rev=809b2816d3504beeba140fef3
dependencies = [
"darling 0.20.9",
"http 1.1.0",
- "indexmap 2.2.5",
+ "indexmap 2.6.0",
"mime",
"proc-macro-crate 3.1.0",
"proc-macro2",
@@ -13830,6 +13849,21 @@ version = "1.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0"
+[[package]]
+name = "quick-junit"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "62ffd2f9a162cfae131bed6d9d1ed60adced33be340a94f96952897d7cb0c240"
+dependencies = [
+ "chrono",
+ "indexmap 2.6.0",
+ "newtype-uuid",
+ "quick-xml 0.36.2",
+ "strip-ansi-escapes",
+ "thiserror",
+ "uuid",
+]
+
[[package]]
name = "quick-xml"
version = "0.23.1"
@@ -13858,6 +13892,15 @@ dependencies = [
"serde",
]
+[[package]]
+name = "quick-xml"
+version = "0.36.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f7649a7b4df05aed9ea7ec6f628c67c9953a43869b8bc50929569b2999d443fe"
+dependencies = [
+ "memchr",
+]
+
[[package]]
name = "quick_cache"
version = "0.5.1"
@@ -15117,7 +15160,7 @@ version = "1.0.114"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c5f09b1bd632ef549eaa9f60a1f8de742bdbc698e6cee2095fc84dde5f549ae0"
dependencies = [
- "indexmap 2.2.5",
+ "indexmap 2.6.0",
"itoa",
"ryu",
"serde",
@@ -15196,7 +15239,7 @@ dependencies = [
"chrono",
"hex",
"indexmap 1.9.3",
- "indexmap 2.2.5",
+ "indexmap 2.6.0",
"serde",
"serde_json",
"serde_with_macros",
@@ -15233,7 +15276,7 @@ version = "0.9.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b1bf28c79a99f70ee1f1d83d10c875d2e70618417fda01ad1785e027579d9d38"
dependencies = [
- "indexmap 2.2.5",
+ "indexmap 2.6.0",
"itoa",
"ryu",
"serde",
@@ -15817,6 +15860,15 @@ dependencies = [
"unicode-normalization",
]
+[[package]]
+name = "strip-ansi-escapes"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "55ff8ef943b384c414f54aefa961dd2bd853add74ec75e7ac74cf91dba62bcfa"
+dependencies = [
+ "vte",
+]
+
[[package]]
name = "strsim"
version = "0.8.0"
@@ -15943,6 +15995,12 @@ version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "734676eb262c623cec13c3155096e08d1f8f29adce39ba17948b18dad1e54142"
+[[package]]
+name = "sugars"
+version = "3.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cc0db74f9ee706e039d031a560bd7d110c7022f016051b3d33eeff9583e3e67a"
+
[[package]]
name = "symbolic-common"
version = "10.2.1"
@@ -16267,18 +16325,18 @@ checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d"
[[package]]
name = "thiserror"
-version = "1.0.61"
+version = "1.0.64"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c546c80d6be4bc6a00c0f01730c08df82eaa7a7a61f11d656526506112cc1709"
+checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
-version = "1.0.61"
+version = "1.0.64"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533"
+checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3"
dependencies = [
"proc-macro2",
"quote",
@@ -16652,7 +16710,7 @@ version = "0.19.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421"
dependencies = [
- "indexmap 2.2.5",
+ "indexmap 2.6.0",
"serde",
"serde_spanned",
"toml_datetime",
@@ -16665,7 +16723,7 @@ version = "0.20.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "396e4d48bbb2b7554c944bde63101b5ae446cff6ec4a24227428f15eb72ef338"
dependencies = [
- "indexmap 2.2.5",
+ "indexmap 2.6.0",
"toml_datetime",
"winnow",
]
@@ -16676,7 +16734,7 @@ version = "0.21.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1"
dependencies = [
- "indexmap 2.2.5",
+ "indexmap 2.6.0",
"toml_datetime",
"winnow",
]
@@ -17361,9 +17419,9 @@ checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a"
[[package]]
name = "uuid"
-version = "1.9.1"
+version = "1.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5de17fd2f7da591098415cff336e12965a28061ddace43b59cb3c430179c9439"
+checksum = "f8c5f0a0af699448548ad1a2fbf920fb4bee257eae39953ba95cb84891a0446a"
dependencies = [
"getrandom 0.2.11",
"serde",
@@ -17415,6 +17473,26 @@ version = "0.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
+[[package]]
+name = "vte"
+version = "0.11.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f5022b5fbf9407086c180e9557be968742d839e68346af7792b8592489732197"
+dependencies = [
+ "utf8parse",
+ "vte_generate_state_changes",
+]
+
+[[package]]
+name = "vte_generate_state_changes"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2e369bee1b05d510a7b4ed645f5faa90619e05437111783ea5848f28d97d3c2e"
+dependencies = [
+ "proc-macro2",
+ "quote",
+]
+
[[package]]
name = "wait-timeout"
version = "0.2.0"
diff --git a/Cargo.toml b/Cargo.toml
index 2156a478de562..bac2fee860de1 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -521,7 +521,12 @@ cfg_block = "0.1.1"
cfg-if = "1.0.0"
ciborium = "0.2"
claims = "0.7"
-clap = { version = "4.3.9", features = ["derive", "env", "unstable-styles", "wrap_help"] }
+clap = { version = "4.3.9", features = [
+ "derive",
+ "env",
+ "unstable-styles",
+ "wrap_help",
+] }
clap-verbosity-flag = "2.1.1"
clap_complete = "4.4.1"
cloud-storage = { version = "0.11.1", features = [
@@ -677,8 +682,14 @@ petgraph = "0.6.5"
pin-project = "1.0.10"
plotters = { version = "0.3.5", default-features = false }
# We're using git deps until https://github.com/poem-web/poem/pull/829 gets formally released.
-poem = { git = "https://github.com/poem-web/poem.git", rev = "809b2816d3504beeba140fef3fdfe9432d654c5b", features = ["anyhow", "rustls"] }
-poem-openapi = { git = "https://github.com/poem-web/poem.git", rev = "809b2816d3504beeba140fef3fdfe9432d654c5b", features = ["swagger-ui", "url"] }
+poem = { git = "https://github.com/poem-web/poem.git", rev = "809b2816d3504beeba140fef3fdfe9432d654c5b", features = [
+ "anyhow",
+ "rustls",
+] }
+poem-openapi = { git = "https://github.com/poem-web/poem.git", rev = "809b2816d3504beeba140fef3fdfe9432d654c5b", features = [
+ "swagger-ui",
+ "url",
+] }
poem-openapi-derive = { git = "https://github.com/poem-web/poem.git", rev = "809b2816d3504beeba140fef3fdfe9432d654c5b" }
poseidon-ark = { git = "https://github.com/arnaucube/poseidon-ark.git", rev = "6d2487aa1308d9d3860a2b724c485d73095c1c68" }
pprof = { version = "0.11", features = ["flamegraph", "protobuf-codec"] }
@@ -696,6 +707,7 @@ prost = { version = "0.12.3", features = ["no-recursion-limit"] }
prost-types = "0.12.3"
quanta = "0.10.1"
quick_cache = "0.5.1"
+quick-junit = "0.5.0"
quote = "1.0.18"
rand = "0.7.3"
rand_core = "0.5.1"
@@ -758,6 +770,7 @@ stats_alloc = "0.1.8"
status-line = "0.2.0"
strum = "0.24.1"
strum_macros = "0.24.2"
+sugars = "3.0.1"
syn = { version = "1.0.92", features = ["derive", "extra-traits"] }
sysinfo = "0.28.4"
tar = "0.4.40"
diff --git a/testsuite/fixtures/testFormatJunitXml.fixture b/testsuite/fixtures/testFormatJunitXml.fixture
new file mode 100644
index 0000000000000..8548bf70a0ca7
--- /dev/null
+++ b/testsuite/fixtures/testFormatJunitXml.fixture
@@ -0,0 +1,3 @@
+
+blah
+
\ No newline at end of file
diff --git a/testsuite/fixtures/testMain.fixture b/testsuite/fixtures/testMain.fixture
index c0107a095369b..aeb54372076e1 100644
--- a/testsuite/fixtures/testMain.fixture
+++ b/testsuite/fixtures/testMain.fixture
@@ -8,6 +8,7 @@ Using the following image tags:
Checking if image exists in GCP: aptos/validator-testing:banana
Checking if image exists in GCP: aptos/validator-testing:banana
Checking if image exists in GCP: aptos/forge:banana
+forge_args: ['forge', '--suite', 'banana-test', '--duration-secs', '300', 'test', 'k8s-swarm', '--image-tag', 'banana', '--upgrade-image-tag', 'banana', '--namespace', 'forge-perry-1659078000']
=== Start temp-pre-comment ===
### Forge is running suite `banana-test` on `banana`
* [Grafana dashboard (auto-refresh)](https://aptoslabs.grafana.net/d/overview/overview?orgId=1&refresh=10s&var-Datasource=VictoriaMetrics%20Global%20%28Non-mainnet%29&var-BigQuery=Google%20BigQuery&var-namespace=forge-perry-1659078000&var-metrics_source=All&var-chain_name=forge-big-1&refresh=10s&from=now-15m&to=now)
@@ -19,6 +20,7 @@ Checking if image exists in GCP: aptos/forge:banana
* Test run is land-blocking
=== End temp-pre-comment ===
Deleting forge pod for namespace forge-perry-1659078000
+rendered_forge_test_runner:
Deleting forge pod for namespace forge-perry-1659078000
=== Start temp-report ===
Forge test runner terminated:
diff --git a/testsuite/forge-cli/Cargo.toml b/testsuite/forge-cli/Cargo.toml
index 4006dbc32c6f7..e6b3000ee9b30 100644
--- a/testsuite/forge-cli/Cargo.toml
+++ b/testsuite/forge-cli/Cargo.toml
@@ -32,6 +32,7 @@ random_word = { workspace = true }
reqwest = { workspace = true }
serde_json = { workspace = true }
serde_yaml = { workspace = true }
+sugars = { workspace = true }
tokio = { workspace = true }
url = { workspace = true }
diff --git a/testsuite/forge-cli/src/main.rs b/testsuite/forge-cli/src/main.rs
index 269247405b162..b90cf2fa40372 100644
--- a/testsuite/forge-cli/src/main.rs
+++ b/testsuite/forge-cli/src/main.rs
@@ -5,13 +5,14 @@
#![allow(clippy::field_reassign_with_default)]
use anyhow::{bail, format_err, Context, Result};
-use aptos_forge::{ForgeConfig, Options, *};
+use aptos_forge::{config::ForgeConfig, Options, *};
use aptos_logger::Level;
use clap::{Parser, Subcommand};
use futures::{future, FutureExt};
use rand::{rngs::ThreadRng, seq::SliceRandom, Rng};
use serde_json::{json, Value};
use std::{self, env, num::NonZeroUsize, process, time::Duration};
+use sugars::{boxed, hmap};
use suites::{
dag::get_dag_test,
indexer::get_indexer_test,
@@ -277,13 +278,13 @@ fn main() -> Result<()> {
mempool_backlog: 5000,
}));
let swarm_dir = local_cfg.swarmdir.clone();
- run_forge(
- duration,
+ let forge = Forge::new(
+ &args.options,
test_suite,
+ duration,
LocalFactory::from_workspace(swarm_dir)?,
- &args.options,
- args.changelog.clone(),
- )
+ );
+ run_forge_with_changelog(forge, &args.options, args.changelog.clone())
},
TestCommand::K8sSwarm(k8s) => {
if let Some(move_modules_dir) = &k8s.move_modules_dir {
@@ -308,9 +309,10 @@ fn main() -> Result<()> {
};
let forge_runner_mode =
ForgeRunnerMode::try_from_env().unwrap_or(ForgeRunnerMode::K8s);
- run_forge(
- duration,
+ let forge = Forge::new(
+ &args.options,
test_suite,
+ duration,
K8sFactory::new(
namespace,
k8s.image_tag.clone(),
@@ -322,12 +324,9 @@ fn main() -> Result<()> {
k8s.enable_haproxy,
k8s.enable_indexer,
k8s.deployer_profile.clone(),
- )
- .unwrap(),
- &args.options,
- args.changelog,
- )?;
- Ok(())
+ )?,
+ );
+ run_forge_with_changelog(forge, &args.options, args.changelog)
},
}
},
@@ -413,39 +412,33 @@ fn main() -> Result<()> {
}
}
-pub fn run_forge(
- global_duration: Duration,
- tests: ForgeConfig,
- factory: F,
+pub fn run_forge_with_changelog(
+ forge: Forge,
options: &Options,
- logs: Option>,
+ optional_changelog: Option>,
) -> Result<()> {
- let forge = Forge::new(options, tests, global_duration, factory);
-
if options.list {
forge.list()?;
return Ok(());
}
- match forge.run() {
- Ok(report) => {
- if let Some(mut changelog) = logs {
- if changelog.len() != 2 {
- println!("Use: changelog ");
- process::exit(1);
- }
- let to_commit = changelog.remove(1);
- let from_commit = Some(changelog.remove(0));
- send_changelog_message(&report.to_string(), &from_commit, &to_commit);
- }
- Ok(())
- },
- Err(e) => {
- eprintln!("Failed to run tests:\n{}", e);
- Err(e)
- },
+ let forge_result = forge.run();
+ let report = forge_result.map_err(|e| {
+ eprintln!("Failed to run tests:\n{}", e);
+ anyhow::anyhow!(e)
+ })?;
+
+ if let Some(changelog) = optional_changelog {
+ if changelog.len() != 2 {
+ println!("Use: changelog ");
+ process::exit(1);
+ }
+ let to_commit = changelog[1].clone();
+ let from_commit = Some(changelog[0].clone());
+ send_changelog_message(&report.to_string(), &from_commit, &to_commit);
}
+ Ok(())
}
pub fn send_changelog_message(perf_msg: &str, from_commit: &Option, to_commit: &str) {
@@ -503,39 +496,42 @@ fn get_test_suite(
duration: Duration,
test_cmd: &TestCommand,
) -> Result {
- // Check the test name against the multi-test suites
- match test_name {
- "local_test_suite" => return Ok(local_test_suite()),
- "pre_release" => return Ok(pre_release_suite()),
- "run_forever" => return Ok(run_forever()),
- // TODO(rustielin): verify each test suite
- "k8s_suite" => return Ok(k8s_test_suite()),
- "chaos" => return Ok(chaos_test_suite(duration)),
- _ => {}, // No multi-test suite matches!
+ // These are high level suite aliases that express an intent
+ let suite_aliases = hmap! {
+ "local_test_suite" => boxed!(local_test_suite) as Box ForgeConfig>,
+ "pre_release" => boxed!(pre_release_suite),
+ "run_forever" => boxed!(run_forever),
+ "k8s_suite" => boxed!(k8s_test_suite),
+ "chaos" => boxed!(|| chaos_test_suite(duration)),
};
+ if let Some(test_suite) = suite_aliases.get(test_name) {
+ return Ok(test_suite());
+ }
+
// Otherwise, check the test name against the grouped test suites
- if let Some(test_suite) = get_land_blocking_test(test_name, duration, test_cmd) {
- Ok(test_suite)
- } else if let Some(test_suite) = get_multi_region_test(test_name) {
- return Ok(test_suite);
- } else if let Some(test_suite) = get_netbench_test(test_name) {
- return Ok(test_suite);
- } else if let Some(test_suite) = get_pfn_test(test_name, duration) {
- return Ok(test_suite);
- } else if let Some(test_suite) = get_realistic_env_test(test_name, duration, test_cmd) {
- return Ok(test_suite);
- } else if let Some(test_suite) = get_state_sync_test(test_name) {
- return Ok(test_suite);
- } else if let Some(test_suite) = get_dag_test(test_name, duration, test_cmd) {
- return Ok(test_suite);
- } else if let Some(test_suite) = get_indexer_test(test_name) {
- return Ok(test_suite);
- } else if let Some(test_suite) = get_ungrouped_test(test_name) {
- return Ok(test_suite);
- } else {
- bail!(format_err!("Invalid --suite given: {:?}", test_name))
+ // This is done in order of priority
+ // A match higher up in the list will take precedence
+ let named_test_suites = [
+ boxed!(|| get_land_blocking_test(test_name, duration, test_cmd))
+ as Box Option>,
+ boxed!(|| get_multi_region_test(test_name)),
+ boxed!(|| get_netbench_test(test_name)),
+ boxed!(|| get_pfn_test(test_name, duration)),
+ boxed!(|| get_realistic_env_test(test_name, duration, test_cmd)),
+ boxed!(|| get_state_sync_test(test_name)),
+ boxed!(|| get_dag_test(test_name, duration, test_cmd)),
+ boxed!(|| get_indexer_test(test_name)),
+ boxed!(|| get_ungrouped_test(test_name)),
+ ];
+
+ for named_suite in named_test_suites.iter() {
+ if let Some(suite) = named_suite() {
+ return Ok(suite);
+ }
}
+
+ bail!(format_err!("Invalid --suite given: {:?}", test_name))
}
#[cfg(test)]
mod test {
diff --git a/testsuite/forge-test-runner-template.yaml b/testsuite/forge-test-runner-template.yaml
index 60876c498df8c..1d856ca269c04 100644
--- a/testsuite/forge-test-runner-template.yaml
+++ b/testsuite/forge-test-runner-template.yaml
@@ -38,6 +38,8 @@ spec:
value: {FORGE_USERNAME}
- name: FORGE_RETAIN_DEBUG_LOGS
value: "{FORGE_RETAIN_DEBUG_LOGS}"
+ - name: FORGE_JUNIT_XML_PATH
+ value: "{FORGE_JUNIT_XML_PATH}"
- name: PROMETHEUS_URL
valueFrom:
secretKeyRef:
diff --git a/testsuite/forge.py b/testsuite/forge.py
index 772597c976009..242d7e1233d4c 100644
--- a/testsuite/forge.py
+++ b/testsuite/forge.py
@@ -266,6 +266,7 @@ class ForgeContext:
forge_username: str
forge_blocking: bool
forge_retain_debug_logs: str
+ forge_junit_xml_path: Optional[str]
github_actions: str
github_job_url: Optional[str]
@@ -688,6 +689,33 @@ def format_comment(context: ForgeContext, result: ForgeResult) -> str:
)
+BEGIN_JUNIT = "=== BEGIN JUNIT ==="
+END_JUNIT = "=== END JUNIT ==="
+
+
+def format_junit_xml(_context: ForgeContext, result: ForgeResult) -> str:
+ forge_output = result.output
+ start_index = forge_output.find(BEGIN_JUNIT)
+ if start_index == -1:
+ raise Exception(
+ "=== BEGIN JUNIT === not found in forge output, unable to write junit xml"
+ )
+
+ start_index += len(BEGIN_JUNIT)
+ if start_index > len(forge_output):
+ raise Exception(
+ "=== BEGIN JUNIT === found at end of forge output, unable to write junit xml"
+ )
+
+ end_index = forge_output.find(END_JUNIT)
+ if end_index == -1:
+ raise Exception(
+ "=== END JUNIT === not found in forge output, unable to write junit xml"
+ )
+
+ return forge_output[start_index:end_index].strip().lstrip()
+
+
class ForgeRunner:
def run(self, context: ForgeContext) -> ForgeResult:
raise NotImplementedError
@@ -840,6 +868,7 @@ def run(self, context: ForgeContext) -> ForgeResult:
FORGE_TEST_SUITE=sanitize_k8s_resource_name(context.forge_test_suite),
FORGE_USERNAME=sanitize_k8s_resource_name(context.forge_username),
FORGE_RETAIN_DEBUG_LOGS=context.forge_retain_debug_logs,
+ FORGE_JUNIT_XML_PATH=context.forge_junit_xml_path,
VALIDATOR_NODE_SELECTOR=validator_node_selector,
KUBECONFIG=MULTIREGION_KUBECONFIG_PATH,
MULTIREGION_KUBECONFIG_DIR=MULTIREGION_KUBECONFIG_DIR,
@@ -1340,10 +1369,11 @@ def seeded_random_choice(namespace: str, cluster_names: Sequence[str]) -> str:
@envoption("FORGE_DEPLOYER_PROFILE")
@envoption("FORGE_ENABLE_FAILPOINTS")
@envoption("FORGE_ENABLE_PERFORMANCE")
-@envoption("FORGE_TEST_SUITE")
@envoption("FORGE_RUNNER_DURATION_SECS", "300")
@envoption("FORGE_IMAGE_TAG")
@envoption("FORGE_RETAIN_DEBUG_LOGS", "false")
+@envoption("FORGE_JUNIT_XML_PATH")
+@envoption("FORGE_TEST_SUITE")
@envoption("IMAGE_TAG")
@envoption("UPGRADE_IMAGE_TAG")
@envoption("FORGE_NAMESPACE")
@@ -1389,6 +1419,7 @@ def test(
forge_runner_duration_secs: str,
forge_image_tag: Optional[str],
forge_retain_debug_logs: str,
+ forge_junit_xml_path: Optional[str],
image_tag: Optional[str],
upgrade_image_tag: Optional[str],
forge_namespace: Optional[str],
@@ -1639,6 +1670,7 @@ def test(
forge_test_suite=forge_test_suite,
forge_username=forge_username,
forge_retain_debug_logs=forge_retain_debug_logs,
+ forge_junit_xml_path=forge_junit_xml_path,
forge_blocking=forge_blocking == "true",
github_actions=github_actions,
github_job_url=(
@@ -1683,6 +1715,9 @@ def test(
log.info(format_comment(forge_context, result))
if github_step_summary:
outputs.append(ForgeFormatter(github_step_summary, format_comment))
+ if forge_junit_xml_path:
+ outputs.append(ForgeFormatter(forge_junit_xml_path, format_junit_xml))
+
forge_context.report(result, outputs)
log.info(result.format(forge_context))
diff --git a/testsuite/forge/Cargo.toml b/testsuite/forge/Cargo.toml
index 9b877474df562..2755feb131130 100644
--- a/testsuite/forge/Cargo.toml
+++ b/testsuite/forge/Cargo.toml
@@ -50,17 +50,20 @@ kube = { version = "0.65.0", default-features = false, features = ["jsonpatch",
num_cpus = { workspace = true }
once_cell = { workspace = true }
prometheus-http-query = { workspace = true }
+quick-junit = { workspace = true }
rand = { workspace = true }
regex = { workspace = true }
reqwest = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
serde_yaml = { workspace = true }
+sugars = { workspace = true }
tempfile = { workspace = true }
termcolor = { workspace = true }
thiserror = { workspace = true }
tokio = { workspace = true }
url = { workspace = true }
+uuid = { workspace = true }
[dev-dependencies]
serde_merge = { workspace = true }
diff --git a/testsuite/forge/src/config.rs b/testsuite/forge/src/config.rs
new file mode 100644
index 0000000000000..940589e7fb3b1
--- /dev/null
+++ b/testsuite/forge/src/config.rs
@@ -0,0 +1,342 @@
+// Copyright © Aptos Foundation
+// Parts of the project are originally copyright © Meta Platforms, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use crate::{
+ success_criteria::{MetricsThreshold, SuccessCriteria, SystemMetricsThreshold},
+ *,
+};
+use aptos_config::config::{NodeConfig, OverrideNodeConfig};
+use aptos_framework::ReleaseBundle;
+use std::{num::NonZeroUsize, sync::Arc};
+
+pub struct ForgeConfig {
+ suite_name: Option,
+
+ pub aptos_tests: Vec>,
+ pub admin_tests: Vec>,
+ pub network_tests: Vec>,
+
+ /// The initial number of validators to spawn when the test harness creates a swarm
+ pub initial_validator_count: NonZeroUsize,
+
+ /// The initial number of fullnodes to spawn when the test harness creates a swarm
+ pub initial_fullnode_count: usize,
+
+ /// The initial version to use when the test harness creates a swarm
+ pub initial_version: InitialVersion,
+
+ /// The initial genesis modules to use when starting a network
+ pub genesis_config: Option,
+
+ /// Optional genesis helm values init function
+ pub genesis_helm_config_fn: Option,
+
+ /// Optional validator node config override function
+ pub validator_override_node_config_fn: Option,
+
+ /// Optional fullnode node config override function
+ pub fullnode_override_node_config_fn: Option,
+
+ pub multi_region_config: bool,
+
+ /// Transaction workload to run on the swarm
+ pub emit_job_request: EmitJobRequest,
+
+ /// Success criteria
+ pub success_criteria: SuccessCriteria,
+
+ /// The label of existing DBs to use, if None, will create new db.
+ pub existing_db_tag: Option,
+
+ pub validator_resource_override: NodeResourceOverride,
+
+ pub fullnode_resource_override: NodeResourceOverride,
+
+ /// Retain debug logs and above for all nodes instead of just the first 5 nodes
+ pub retain_debug_logs: bool,
+}
+
+impl ForgeConfig {
+ pub fn new() -> Self {
+ Self::default()
+ }
+
+ pub fn add_aptos_test(mut self, aptos_test: T) -> Self {
+ self.aptos_tests.push(Box::new(aptos_test));
+ self
+ }
+
+ pub fn get_suite_name(&self) -> Option {
+ self.suite_name.clone()
+ }
+
+ pub fn with_suite_name(mut self, suite_name: String) -> Self {
+ self.suite_name = Some(suite_name);
+ self
+ }
+
+ pub fn with_aptos_tests(mut self, aptos_tests: Vec>) -> Self {
+ self.aptos_tests = aptos_tests;
+ self
+ }
+
+ pub fn add_admin_test(mut self, admin_test: T) -> Self {
+ self.admin_tests.push(Box::new(admin_test));
+ self
+ }
+
+ pub fn with_admin_tests(mut self, admin_tests: Vec>) -> Self {
+ self.admin_tests = admin_tests;
+ self
+ }
+
+ pub fn add_network_test(mut self, network_test: T) -> Self {
+ self.network_tests.push(Box::new(network_test));
+ self
+ }
+
+ pub fn with_network_tests(mut self, network_tests: Vec>) -> Self {
+ self.network_tests = network_tests;
+ self
+ }
+
+ pub fn with_initial_validator_count(mut self, initial_validator_count: NonZeroUsize) -> Self {
+ self.initial_validator_count = initial_validator_count;
+ self
+ }
+
+ pub fn with_initial_fullnode_count(mut self, initial_fullnode_count: usize) -> Self {
+ self.initial_fullnode_count = initial_fullnode_count;
+ self
+ }
+
+ pub fn with_genesis_helm_config_fn(mut self, genesis_helm_config_fn: GenesisConfigFn) -> Self {
+ self.genesis_helm_config_fn = Some(genesis_helm_config_fn);
+ self
+ }
+
+ pub fn with_validator_override_node_config_fn(mut self, f: OverrideNodeConfigFn) -> Self {
+ self.validator_override_node_config_fn = Some(f);
+ self
+ }
+
+ pub fn with_fullnode_override_node_config_fn(mut self, f: OverrideNodeConfigFn) -> Self {
+ self.fullnode_override_node_config_fn = Some(f);
+ self
+ }
+
+ pub fn with_multi_region_config(mut self) -> Self {
+ self.multi_region_config = true;
+ self
+ }
+
+ pub fn with_validator_resource_override(
+ mut self,
+ resource_override: NodeResourceOverride,
+ ) -> Self {
+ self.validator_resource_override = resource_override;
+ self
+ }
+
+ pub fn with_fullnode_resource_override(
+ mut self,
+ resource_override: NodeResourceOverride,
+ ) -> Self {
+ self.fullnode_resource_override = resource_override;
+ self
+ }
+
+ fn override_node_config_from_fn(config_fn: OverrideNodeConfigFn) -> OverrideNodeConfig {
+ let mut override_config = NodeConfig::default();
+ let mut base_config = NodeConfig::default();
+ config_fn(&mut override_config, &mut base_config);
+ OverrideNodeConfig::new(override_config, base_config)
+ }
+
+ /// Builds a function that can be used to override the default helm values for the validator and fullnode.
+ /// If a configuration is intended to be set for all nodes, set the value in the default helm values file:
+ /// testsuite/forge/src/backend/k8s/helm-values/aptos-node-default-values.yaml
+ pub fn build_node_helm_config_fn(&self, retain_debug_logs: bool) -> Option {
+ let validator_override_node_config = self
+ .validator_override_node_config_fn
+ .clone()
+ .map(|config_fn| Self::override_node_config_from_fn(config_fn));
+ let fullnode_override_node_config = self
+ .fullnode_override_node_config_fn
+ .clone()
+ .map(|config_fn| Self::override_node_config_from_fn(config_fn));
+ let multi_region_config = self.multi_region_config;
+ let existing_db_tag = self.existing_db_tag.clone();
+ let validator_resource_override = self.validator_resource_override;
+ let fullnode_resource_override = self.fullnode_resource_override;
+
+ // Override specific helm values. See reference: terraform/helm/aptos-node/values.yaml
+ Some(Arc::new(move |helm_values: &mut serde_yaml::Value| {
+ if let Some(override_config) = &validator_override_node_config {
+ helm_values["validator"]["config"] = override_config.get_yaml().unwrap();
+ }
+ if let Some(override_config) = &fullnode_override_node_config {
+ helm_values["fullnode"]["config"] = override_config.get_yaml().unwrap();
+ }
+ if multi_region_config {
+ helm_values["multicluster"]["enabled"] = true.into();
+ // Create headless services for validators and fullnodes.
+ // Note: chaos-mesh will not work with clusterIP services.
+ helm_values["service"]["validator"]["internal"]["type"] = "ClusterIP".into();
+ helm_values["service"]["validator"]["internal"]["headless"] = true.into();
+ helm_values["service"]["fullnode"]["internal"]["type"] = "ClusterIP".into();
+ helm_values["service"]["fullnode"]["internal"]["headless"] = true.into();
+ }
+ if let Some(existing_db_tag) = &existing_db_tag {
+ helm_values["validator"]["storage"]["labels"]["tag"] =
+ existing_db_tag.clone().into();
+ helm_values["fullnode"]["storage"]["labels"]["tag"] =
+ existing_db_tag.clone().into();
+ }
+
+ // validator resource overrides
+ if let Some(cpu_cores) = validator_resource_override.cpu_cores {
+ helm_values["validator"]["resources"]["requests"]["cpu"] = cpu_cores.into();
+ helm_values["validator"]["resources"]["limits"]["cpu"] = cpu_cores.into();
+ }
+ if let Some(memory_gib) = validator_resource_override.memory_gib {
+ helm_values["validator"]["resources"]["requests"]["memory"] =
+ format!("{}Gi", memory_gib).into();
+ helm_values["validator"]["resources"]["limits"]["memory"] =
+ format!("{}Gi", memory_gib).into();
+ }
+ if let Some(storage_gib) = validator_resource_override.storage_gib {
+ helm_values["validator"]["storage"]["size"] = format!("{}Gi", storage_gib).into();
+ }
+ // fullnode resource overrides
+ if let Some(cpu_cores) = fullnode_resource_override.cpu_cores {
+ helm_values["fullnode"]["resources"]["requests"]["cpu"] = cpu_cores.into();
+ helm_values["fullnode"]["resources"]["limits"]["cpu"] = cpu_cores.into();
+ }
+ if let Some(memory_gib) = fullnode_resource_override.memory_gib {
+ helm_values["fullnode"]["resources"]["requests"]["memory"] =
+ format!("{}Gi", memory_gib).into();
+ helm_values["fullnode"]["resources"]["limits"]["memory"] =
+ format!("{}Gi", memory_gib).into();
+ }
+ if let Some(storage_gib) = fullnode_resource_override.storage_gib {
+ helm_values["fullnode"]["storage"]["size"] = format!("{}Gi", storage_gib).into();
+ }
+
+ if retain_debug_logs {
+ helm_values["validator"]["podAnnotations"]["aptos.dev/min-log-level-to-retain"] =
+ serde_yaml::Value::String("debug".to_owned());
+ helm_values["fullnode"]["podAnnotations"]["aptos.dev/min-log-level-to-retain"] =
+ serde_yaml::Value::String("debug".to_owned());
+ helm_values["validator"]["rust_log"] = "debug,hyper=off".into();
+ helm_values["fullnode"]["rust_log"] = "debug,hyper=off".into();
+ }
+ helm_values["validator"]["config"]["storage"]["rocksdb_configs"]
+ ["enable_storage_sharding"] = true.into();
+ helm_values["fullnode"]["config"]["storage"]["rocksdb_configs"]
+ ["enable_storage_sharding"] = true.into();
+ helm_values["validator"]["config"]["indexer_db_config"]["enable_event"] = true.into();
+ helm_values["fullnode"]["config"]["indexer_db_config"]["enable_event"] = true.into();
+ }))
+ }
+
+ pub fn with_initial_version(mut self, initial_version: InitialVersion) -> Self {
+ self.initial_version = initial_version;
+ self
+ }
+
+ pub fn with_genesis_module_bundle(mut self, bundle: ReleaseBundle) -> Self {
+ self.genesis_config = Some(GenesisConfig::Bundle(bundle));
+ self
+ }
+
+ pub fn with_genesis_modules_path(mut self, genesis_modules: String) -> Self {
+ self.genesis_config = Some(GenesisConfig::Path(genesis_modules));
+ self
+ }
+
+ pub fn with_emit_job(mut self, emit_job_request: EmitJobRequest) -> Self {
+ self.emit_job_request = emit_job_request;
+ self
+ }
+
+ pub fn get_emit_job(&self) -> &EmitJobRequest {
+ &self.emit_job_request
+ }
+
+ pub fn with_success_criteria(mut self, success_criteria: SuccessCriteria) -> Self {
+ self.success_criteria = success_criteria;
+ self
+ }
+
+ pub fn get_success_criteria_mut(&mut self) -> &mut SuccessCriteria {
+ &mut self.success_criteria
+ }
+
+ pub fn with_existing_db(mut self, tag: String) -> Self {
+ self.existing_db_tag = Some(tag);
+ self
+ }
+
+ pub fn number_of_tests(&self) -> usize {
+ self.admin_tests.len() + self.network_tests.len() + self.aptos_tests.len()
+ }
+
+ pub fn all_tests(&self) -> Vec>> {
+ self.admin_tests
+ .iter()
+ .map(|t| Box::new(AnyTestRef::Admin(t.as_ref())))
+ .chain(
+ self.network_tests
+ .iter()
+ .map(|t| Box::new(AnyTestRef::Network(t.as_ref()))),
+ )
+ .chain(
+ self.aptos_tests
+ .iter()
+ .map(|t| Box::new(AnyTestRef::Aptos(t.as_ref()))),
+ )
+ .collect()
+ }
+}
+
+impl Default for ForgeConfig {
+ fn default() -> Self {
+ let forge_run_mode = ForgeRunnerMode::try_from_env().unwrap_or(ForgeRunnerMode::K8s);
+ let success_criteria = if forge_run_mode == ForgeRunnerMode::Local {
+ SuccessCriteria::new(600).add_no_restarts()
+ } else {
+ SuccessCriteria::new(3500)
+ .add_no_restarts()
+ .add_system_metrics_threshold(SystemMetricsThreshold::new(
+ // Check that we don't use more than 12 CPU cores for 30% of the time.
+ MetricsThreshold::new(12.0, 30),
+ // Check that we don't use more than 10 GB of memory for 30% of the time.
+ MetricsThreshold::new_gb(10.0, 30),
+ ))
+ };
+ Self {
+ suite_name: None,
+ aptos_tests: vec![],
+ admin_tests: vec![],
+ network_tests: vec![],
+ initial_validator_count: NonZeroUsize::new(1).unwrap(),
+ initial_fullnode_count: 0,
+ initial_version: InitialVersion::Oldest,
+ genesis_config: None,
+ genesis_helm_config_fn: None,
+ validator_override_node_config_fn: None,
+ fullnode_override_node_config_fn: None,
+ multi_region_config: false,
+ emit_job_request: EmitJobRequest::default().mode(EmitJobMode::MaxLoad {
+ mempool_backlog: 40000,
+ }),
+ success_criteria,
+ existing_db_tag: None,
+ validator_resource_override: NodeResourceOverride::default(),
+ fullnode_resource_override: NodeResourceOverride::default(),
+ retain_debug_logs: false,
+ }
+ }
+}
diff --git a/testsuite/forge/src/interface/test.rs b/testsuite/forge/src/interface/test.rs
index 72c78e6a64514..d3f6c9244b89c 100644
--- a/testsuite/forge/src/interface/test.rs
+++ b/testsuite/forge/src/interface/test.rs
@@ -3,6 +3,7 @@
// SPDX-License-Identifier: Apache-2.0
use rand::SeedableRng;
+use std::borrow::Cow;
/// Whether a test is expected to fail or not
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
@@ -12,6 +13,22 @@ pub enum ShouldFail {
YesWithMessage(&'static str),
}
+#[derive(Debug, Clone)]
+pub struct TestDetails {
+ pub name: String,
+ pub reporting_name: String,
+}
+
+impl TestDetails {
+ pub fn name(&self) -> String {
+ self.name.clone()
+ }
+
+ pub fn reporting_name(&self) -> String {
+ self.reporting_name.clone()
+ }
+}
+
/// Represents a Test in Forge
///
/// This is meant to be a super trait of the other test interfaces.
@@ -28,6 +45,18 @@ pub trait Test: Send + Sync {
fn should_fail(&self) -> ShouldFail {
ShouldFail::No
}
+
+ /// Name used specifically for external reporting
+ fn reporting_name(&self) -> Cow<'static, str> {
+ Cow::Borrowed(self.name())
+ }
+
+ fn details(&self) -> TestDetails {
+ TestDetails {
+ name: self.name().to_string(),
+ reporting_name: self.reporting_name().to_string(),
+ }
+ }
}
impl Test for &T {
diff --git a/testsuite/forge/src/lib.rs b/testsuite/forge/src/lib.rs
index bdd8ec3cc6eeb..3c8dffb773d1b 100644
--- a/testsuite/forge/src/lib.rs
+++ b/testsuite/forge/src/lib.rs
@@ -9,6 +9,7 @@ pub use anyhow::Result;
mod interface;
pub use interface::*;
+pub mod observer;
mod runner;
pub use runner::*;
@@ -19,6 +20,7 @@ pub use backend::*;
mod report;
pub use report::*;
+pub mod result;
mod github;
pub use github::*;
@@ -29,3 +31,6 @@ pub use slack::*;
pub mod success_criteria;
pub mod test_utils;
+
+pub mod config;
+pub use config::ForgeConfig;
diff --git a/testsuite/forge/src/observer/junit.rs b/testsuite/forge/src/observer/junit.rs
new file mode 100644
index 0000000000000..30ddce90db671
--- /dev/null
+++ b/testsuite/forge/src/observer/junit.rs
@@ -0,0 +1,79 @@
+// Copyright © Aptos Foundation
+// Parts of the project are originally copyright © Meta Platforms, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use crate::{
+ result::{TestObserver, TestResult},
+ TestDetails,
+};
+use anyhow::Result;
+use quick_junit::{NonSuccessKind, Report, TestCase, TestSuite};
+use std::sync::Mutex;
+use uuid::Uuid;
+
+pub struct JunitTestObserver {
+ name: String,
+ path: String,
+ results: Mutex>,
+}
+
+impl JunitTestObserver {
+ pub fn new(name: String, path: String) -> Self {
+ Self {
+ name,
+ path,
+ results: Mutex::new(vec![]),
+ }
+ }
+}
+
+impl TestObserver for JunitTestObserver {
+ fn name(&self) -> String {
+ format!("{} junit observer", self.name)
+ }
+
+ fn handle_result(&self, details: &TestDetails, result: &TestResult) -> Result<()> {
+ self.results
+ .lock()
+ .unwrap()
+ .push((details.reporting_name(), result.clone()));
+ Ok(())
+ }
+
+ fn finish(&self) -> Result<()> {
+ let mut report = Report::new("forge");
+ let uuid = Uuid::new_v4();
+ report.set_uuid(uuid);
+
+ let mut suite = TestSuite::new(self.name.clone());
+ for (test_name, result) in self.results.lock().unwrap().iter() {
+ let status = match result {
+ TestResult::Ok => quick_junit::TestCaseStatus::success(),
+ TestResult::FailedWithMsg(msg) => {
+ // Not 100% sure what the difference between failure and error is.
+ let mut status =
+ quick_junit::TestCaseStatus::non_success(NonSuccessKind::Failure);
+ status.set_message(msg.clone());
+ status
+ },
+ };
+
+ let test_case = TestCase::new(test_name.clone(), status);
+ suite.add_test_case(test_case);
+ }
+
+ report.add_test_suite(suite);
+
+ // Write to stdout so github test runner can parse it easily
+ println!("=== BEGIN JUNIT ===");
+ let stdout = std::io::stdout();
+ report.serialize(stdout)?;
+ println!("=== END JUNIT ===");
+
+ // Also write to the file
+ let writer = std::fs::File::create(&self.path)?;
+ report.serialize(writer)?;
+
+ Ok(())
+ }
+}
diff --git a/testsuite/forge/src/observer/mod.rs b/testsuite/forge/src/observer/mod.rs
new file mode 100644
index 0000000000000..e5948202b9e2d
--- /dev/null
+++ b/testsuite/forge/src/observer/mod.rs
@@ -0,0 +1,5 @@
+// Copyright © Aptos Foundation
+// Parts of the project are originally copyright © Meta Platforms, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+pub mod junit;
diff --git a/testsuite/forge/src/result.rs b/testsuite/forge/src/result.rs
new file mode 100644
index 0000000000000..0c96d2d1f1d19
--- /dev/null
+++ b/testsuite/forge/src/result.rs
@@ -0,0 +1,159 @@
+// Copyright © Aptos Foundation
+// Parts of the project are originally copyright © Meta Platforms, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use crate::TestDetails;
+use anyhow::{bail, Result};
+use std::{
+ fmt::{Display, Formatter},
+ io::{self, Write as _},
+};
+use termcolor::{Color, ColorChoice, ColorSpec, StandardStream, WriteColor};
+
+#[derive(Debug, Clone)]
+pub enum TestResult {
+ Ok,
+ FailedWithMsg(String),
+}
+
+impl Display for TestResult {
+ fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
+ match self {
+ TestResult::Ok => write!(f, "Test Ok"),
+ TestResult::FailedWithMsg(msg) => write!(f, "Test Failed: {}", msg),
+ }
+ }
+}
+
+pub trait TestObserver {
+ fn name(&self) -> String;
+ fn handle_result(&self, details: &TestDetails, result: &TestResult) -> Result<()>;
+ fn finish(&self) -> Result<()>;
+}
+
+pub struct TestSummary {
+ stdout: StandardStream,
+ total: usize,
+ filtered_out: usize,
+ passed: usize,
+ failed: Vec,
+ observers: Vec>,
+}
+
+impl TestSummary {
+ pub fn new(total: usize, filtered_out: usize) -> Self {
+ Self {
+ stdout: StandardStream::stdout(ColorChoice::Auto),
+ total,
+ filtered_out,
+ passed: 0,
+ failed: Vec::new(),
+ observers: Vec::new(),
+ }
+ }
+
+ pub fn add_observer(&mut self, observer: Box) {
+ self.observers.push(observer);
+ }
+
+ pub fn handle_result(&mut self, details: TestDetails, result: TestResult) -> Result<()> {
+ write!(self.stdout, "test {} ... ", details.name())?;
+ match result.clone() {
+ TestResult::Ok => {
+ self.passed += 1;
+ self.write_ok()?;
+ },
+ TestResult::FailedWithMsg(msg) => {
+ self.failed.push(details.name());
+ self.write_failed()?;
+ writeln!(self.stdout)?;
+
+ write!(self.stdout, "Error: {}", msg)?;
+ },
+ }
+ writeln!(self.stdout)?;
+ let mut errors = vec![];
+ for observer in &self.observers {
+ let result = observer.handle_result(&details, &result);
+ if let Err(e) = result {
+ errors.push(format!("{}: {}", observer.name(), e));
+ }
+ }
+ if !errors.is_empty() {
+ bail!("Failed to handle_result in observers: {:?}", errors);
+ }
+ Ok(())
+ }
+
+ pub fn finish(&self) -> Result<()> {
+ let mut errors = vec![];
+ for observer in &self.observers {
+ let result = observer.finish();
+ if let Err(e) = result {
+ errors.push(format!("{}: {}", observer.name(), e));
+ }
+ }
+ if !errors.is_empty() {
+ bail!("Failed to finish observers: {:?}", errors);
+ }
+ Ok(())
+ }
+
+ fn write_ok(&mut self) -> io::Result<()> {
+ self.stdout
+ .set_color(ColorSpec::new().set_fg(Some(Color::Green)))?;
+ write!(self.stdout, "ok")?;
+ self.stdout.reset()?;
+ Ok(())
+ }
+
+ fn write_failed(&mut self) -> io::Result<()> {
+ self.stdout
+ .set_color(ColorSpec::new().set_fg(Some(Color::Red)))?;
+ write!(self.stdout, "FAILED")?;
+ self.stdout.reset()?;
+ Ok(())
+ }
+
+ pub fn write_starting_msg(&mut self) -> io::Result<()> {
+ writeln!(self.stdout)?;
+ writeln!(
+ self.stdout,
+ "running {} tests",
+ self.total - self.filtered_out
+ )?;
+ Ok(())
+ }
+
+ pub fn write_summary(&mut self) -> io::Result<()> {
+ // Print out the failing tests
+ if !self.failed.is_empty() {
+ writeln!(self.stdout)?;
+ writeln!(self.stdout, "failures:")?;
+ for name in &self.failed {
+ writeln!(self.stdout, " {}", name)?;
+ }
+ }
+
+ writeln!(self.stdout)?;
+ write!(self.stdout, "test result: ")?;
+ if self.failed.is_empty() {
+ self.write_ok()?;
+ } else {
+ self.write_failed()?;
+ }
+ writeln!(
+ self.stdout,
+ ". {} passed; {} failed; {} filtered out",
+ self.passed,
+ self.failed.len(),
+ self.filtered_out
+ )?;
+ writeln!(self.stdout)?;
+ Ok(())
+ }
+
+ pub fn success(&self) -> bool {
+ self.failed.is_empty()
+ }
+}
diff --git a/testsuite/forge/src/runner.rs b/testsuite/forge/src/runner.rs
index 73e0262708f9e..5545f9ef2939b 100644
--- a/testsuite/forge/src/runner.rs
+++ b/testsuite/forge/src/runner.rs
@@ -4,16 +4,18 @@
// TODO going to remove random seed once cluster deployment supports re-run genesis
use crate::{
- success_criteria::{MetricsThreshold, SuccessCriteria, SystemMetricsThreshold},
- *,
+ config::ForgeConfig,
+ observer::junit::JunitTestObserver,
+ result::{TestResult, TestSummary},
+ AdminContext, AdminTest, AptosContext, AptosTest, CoreContext, Factory, NetworkContext,
+ NetworkContextSynchronizer, NetworkTest, ShouldFail, Test, TestReport, Version,
+ NAMESPACE_CLEANUP_DURATION_BUFFER_SECS,
};
use anyhow::{bail, format_err, Error, Result};
-use aptos_config::config::{NodeConfig, OverrideNodeConfig};
-use aptos_framework::ReleaseBundle;
+use aptos_config::config::NodeConfig;
use clap::{Parser, ValueEnum};
use rand::{rngs::OsRng, Rng, SeedableRng};
use std::{
- fmt::{Display, Formatter},
io::{self, Write},
num::NonZeroUsize,
process,
@@ -21,7 +23,7 @@ use std::{
sync::Arc,
time::Duration,
};
-use termcolor::{Color, ColorChoice, ColorSpec, StandardStream, WriteColor};
+use sugars::boxed;
use tokio::runtime::Runtime;
const KUBERNETES_SERVICE_HOST: &str = "KUBERNETES_SERVICE_HOST";
@@ -78,6 +80,9 @@ pub struct Options {
/// Retain debug logs and above for all nodes instead of just the first 5 nodes
#[clap(long, default_value = "false", env = "FORGE_RETAIN_DEBUG_LOGS")]
retain_debug_logs: bool,
+ /// Optional path to write junit xml test report
+ #[clap(long, env = "FORGE_JUNIT_XML_PATH")]
+ junit_xml_path: Option,
}
impl Options {
@@ -130,286 +135,6 @@ pub struct NodeResourceOverride {
pub storage_gib: Option,
}
-pub struct ForgeConfig {
- aptos_tests: Vec>,
- admin_tests: Vec>,
- network_tests: Vec>,
-
- /// The initial number of validators to spawn when the test harness creates a swarm
- initial_validator_count: NonZeroUsize,
-
- /// The initial number of fullnodes to spawn when the test harness creates a swarm
- initial_fullnode_count: usize,
-
- /// The initial version to use when the test harness creates a swarm
- initial_version: InitialVersion,
-
- /// The initial genesis modules to use when starting a network
- genesis_config: Option,
-
- /// Optional genesis helm values init function
- genesis_helm_config_fn: Option,
-
- /// Optional validator node config override function
- validator_override_node_config_fn: Option,
-
- /// Optional fullnode node config override function
- fullnode_override_node_config_fn: Option,
-
- multi_region_config: bool,
-
- /// Transaction workload to run on the swarm
- emit_job_request: EmitJobRequest,
-
- /// Success criteria
- success_criteria: SuccessCriteria,
-
- /// The label of existing DBs to use, if None, will create new db.
- existing_db_tag: Option,
-
- validator_resource_override: NodeResourceOverride,
-
- fullnode_resource_override: NodeResourceOverride,
-
- /// Retain debug logs and above for all nodes instead of just the first 5 nodes
- retain_debug_logs: bool,
-}
-
-impl ForgeConfig {
- pub fn new() -> Self {
- Self::default()
- }
-
- pub fn add_aptos_test(mut self, aptos_test: T) -> Self {
- self.aptos_tests.push(Box::new(aptos_test));
- self
- }
-
- pub fn with_aptos_tests(mut self, aptos_tests: Vec>) -> Self {
- self.aptos_tests = aptos_tests;
- self
- }
-
- pub fn add_admin_test(mut self, admin_test: T) -> Self {
- self.admin_tests.push(Box::new(admin_test));
- self
- }
-
- pub fn with_admin_tests(mut self, admin_tests: Vec>) -> Self {
- self.admin_tests = admin_tests;
- self
- }
-
- pub fn add_network_test(mut self, network_test: T) -> Self {
- self.network_tests.push(Box::new(network_test));
- self
- }
-
- pub fn with_network_tests(mut self, network_tests: Vec>) -> Self {
- self.network_tests = network_tests;
- self
- }
-
- pub fn with_initial_validator_count(mut self, initial_validator_count: NonZeroUsize) -> Self {
- self.initial_validator_count = initial_validator_count;
- self
- }
-
- pub fn with_initial_fullnode_count(mut self, initial_fullnode_count: usize) -> Self {
- self.initial_fullnode_count = initial_fullnode_count;
- self
- }
-
- pub fn with_genesis_helm_config_fn(mut self, genesis_helm_config_fn: GenesisConfigFn) -> Self {
- self.genesis_helm_config_fn = Some(genesis_helm_config_fn);
- self
- }
-
- pub fn with_validator_override_node_config_fn(mut self, f: OverrideNodeConfigFn) -> Self {
- self.validator_override_node_config_fn = Some(f);
- self
- }
-
- pub fn with_fullnode_override_node_config_fn(mut self, f: OverrideNodeConfigFn) -> Self {
- self.fullnode_override_node_config_fn = Some(f);
- self
- }
-
- pub fn with_multi_region_config(mut self) -> Self {
- self.multi_region_config = true;
- self
- }
-
- pub fn with_validator_resource_override(
- mut self,
- resource_override: NodeResourceOverride,
- ) -> Self {
- self.validator_resource_override = resource_override;
- self
- }
-
- pub fn with_fullnode_resource_override(
- mut self,
- resource_override: NodeResourceOverride,
- ) -> Self {
- self.fullnode_resource_override = resource_override;
- self
- }
-
- fn override_node_config_from_fn(config_fn: OverrideNodeConfigFn) -> OverrideNodeConfig {
- let mut override_config = NodeConfig::default();
- let mut base_config = NodeConfig::default();
- config_fn(&mut override_config, &mut base_config);
- OverrideNodeConfig::new(override_config, base_config)
- }
-
- /// Builds a function that can be used to override the default helm values for the validator and fullnode.
- /// If a configuration is intended to be set for all nodes, set the value in the default helm values file:
- /// testsuite/forge/src/backend/k8s/helm-values/aptos-node-default-values.yaml
- pub fn build_node_helm_config_fn(&self, retain_debug_logs: bool) -> Option {
- let validator_override_node_config = self
- .validator_override_node_config_fn
- .clone()
- .map(|config_fn| Self::override_node_config_from_fn(config_fn));
- let fullnode_override_node_config = self
- .fullnode_override_node_config_fn
- .clone()
- .map(|config_fn| Self::override_node_config_from_fn(config_fn));
- let multi_region_config = self.multi_region_config;
- let existing_db_tag = self.existing_db_tag.clone();
- let validator_resource_override = self.validator_resource_override;
- let fullnode_resource_override = self.fullnode_resource_override;
-
- // Override specific helm values. See reference: terraform/helm/aptos-node/values.yaml
- Some(Arc::new(move |helm_values: &mut serde_yaml::Value| {
- if let Some(override_config) = &validator_override_node_config {
- helm_values["validator"]["config"] = override_config.get_yaml().unwrap();
- }
- if let Some(override_config) = &fullnode_override_node_config {
- helm_values["fullnode"]["config"] = override_config.get_yaml().unwrap();
- }
- if multi_region_config {
- helm_values["multicluster"]["enabled"] = true.into();
- // Create headless services for validators and fullnodes.
- // Note: chaos-mesh will not work with clusterIP services.
- helm_values["service"]["validator"]["internal"]["type"] = "ClusterIP".into();
- helm_values["service"]["validator"]["internal"]["headless"] = true.into();
- helm_values["service"]["fullnode"]["internal"]["type"] = "ClusterIP".into();
- helm_values["service"]["fullnode"]["internal"]["headless"] = true.into();
- }
- if let Some(existing_db_tag) = &existing_db_tag {
- helm_values["validator"]["storage"]["labels"]["tag"] =
- existing_db_tag.clone().into();
- helm_values["fullnode"]["storage"]["labels"]["tag"] =
- existing_db_tag.clone().into();
- }
-
- // validator resource overrides
- if let Some(cpu_cores) = validator_resource_override.cpu_cores {
- helm_values["validator"]["resources"]["requests"]["cpu"] = cpu_cores.into();
- helm_values["validator"]["resources"]["limits"]["cpu"] = cpu_cores.into();
- }
- if let Some(memory_gib) = validator_resource_override.memory_gib {
- helm_values["validator"]["resources"]["requests"]["memory"] =
- format!("{}Gi", memory_gib).into();
- helm_values["validator"]["resources"]["limits"]["memory"] =
- format!("{}Gi", memory_gib).into();
- }
- if let Some(storage_gib) = validator_resource_override.storage_gib {
- helm_values["validator"]["storage"]["size"] = format!("{}Gi", storage_gib).into();
- }
- // fullnode resource overrides
- if let Some(cpu_cores) = fullnode_resource_override.cpu_cores {
- helm_values["fullnode"]["resources"]["requests"]["cpu"] = cpu_cores.into();
- helm_values["fullnode"]["resources"]["limits"]["cpu"] = cpu_cores.into();
- }
- if let Some(memory_gib) = fullnode_resource_override.memory_gib {
- helm_values["fullnode"]["resources"]["requests"]["memory"] =
- format!("{}Gi", memory_gib).into();
- helm_values["fullnode"]["resources"]["limits"]["memory"] =
- format!("{}Gi", memory_gib).into();
- }
- if let Some(storage_gib) = fullnode_resource_override.storage_gib {
- helm_values["fullnode"]["storage"]["size"] = format!("{}Gi", storage_gib).into();
- }
-
- if retain_debug_logs {
- helm_values["validator"]["podAnnotations"]["aptos.dev/min-log-level-to-retain"] =
- serde_yaml::Value::String("debug".to_owned());
- helm_values["fullnode"]["podAnnotations"]["aptos.dev/min-log-level-to-retain"] =
- serde_yaml::Value::String("debug".to_owned());
- helm_values["validator"]["rust_log"] = "debug,hyper=off".into();
- helm_values["fullnode"]["rust_log"] = "debug,hyper=off".into();
- }
- helm_values["validator"]["config"]["storage"]["rocksdb_configs"]
- ["enable_storage_sharding"] = true.into();
- helm_values["fullnode"]["config"]["storage"]["rocksdb_configs"]
- ["enable_storage_sharding"] = true.into();
- helm_values["validator"]["config"]["indexer_db_config"]["enable_event"] = true.into();
- helm_values["fullnode"]["config"]["indexer_db_config"]["enable_event"] = true.into();
- }))
- }
-
- pub fn with_initial_version(mut self, initial_version: InitialVersion) -> Self {
- self.initial_version = initial_version;
- self
- }
-
- pub fn with_genesis_module_bundle(mut self, bundle: ReleaseBundle) -> Self {
- self.genesis_config = Some(GenesisConfig::Bundle(bundle));
- self
- }
-
- pub fn with_genesis_modules_path(mut self, genesis_modules: String) -> Self {
- self.genesis_config = Some(GenesisConfig::Path(genesis_modules));
- self
- }
-
- pub fn with_emit_job(mut self, emit_job_request: EmitJobRequest) -> Self {
- self.emit_job_request = emit_job_request;
- self
- }
-
- pub fn get_emit_job(&self) -> &EmitJobRequest {
- &self.emit_job_request
- }
-
- pub fn with_success_criteria(mut self, success_criteria: SuccessCriteria) -> Self {
- self.success_criteria = success_criteria;
- self
- }
-
- pub fn get_success_criteria_mut(&mut self) -> &mut SuccessCriteria {
- &mut self.success_criteria
- }
-
- pub fn with_existing_db(mut self, tag: String) -> Self {
- self.existing_db_tag = Some(tag);
- self
- }
-
- pub fn number_of_tests(&self) -> usize {
- self.admin_tests.len() + self.network_tests.len() + self.aptos_tests.len()
- }
-
- pub fn all_tests(&self) -> Vec>> {
- self.admin_tests
- .iter()
- .map(|t| Box::new(AnyTestRef::Admin(t.as_ref())))
- .chain(
- self.network_tests
- .iter()
- .map(|t| Box::new(AnyTestRef::Network(t.as_ref()))),
- )
- .chain(
- self.aptos_tests
- .iter()
- .map(|t| Box::new(AnyTestRef::Aptos(t.as_ref()))),
- )
- .collect()
- }
-}
-
// Workaround way to implement all_tests, for:
// error[E0658]: cannot cast `dyn interface::admin::AdminTest` to `dyn interface::test::Test`, trait upcasting coercion is experimental
pub enum AnyTestRef<'a> {
@@ -474,45 +199,6 @@ impl ForgeRunnerMode {
}
}
-impl Default for ForgeConfig {
- fn default() -> Self {
- let forge_run_mode = ForgeRunnerMode::try_from_env().unwrap_or(ForgeRunnerMode::K8s);
- let success_criteria = if forge_run_mode == ForgeRunnerMode::Local {
- SuccessCriteria::new(600).add_no_restarts()
- } else {
- SuccessCriteria::new(3500)
- .add_no_restarts()
- .add_system_metrics_threshold(SystemMetricsThreshold::new(
- // Check that we don't use more than 12 CPU cores for 30% of the time.
- MetricsThreshold::new(12.0, 30),
- // Check that we don't use more than 10 GB of memory for 30% of the time.
- MetricsThreshold::new_gb(10.0, 30),
- ))
- };
- Self {
- aptos_tests: vec![],
- admin_tests: vec![],
- network_tests: vec![],
- initial_validator_count: NonZeroUsize::new(1).unwrap(),
- initial_fullnode_count: 0,
- initial_version: InitialVersion::Oldest,
- genesis_config: None,
- genesis_helm_config_fn: None,
- validator_override_node_config_fn: None,
- fullnode_override_node_config_fn: None,
- multi_region_config: false,
- emit_job_request: EmitJobRequest::default().mode(EmitJobMode::MaxLoad {
- mempool_backlog: 40000,
- }),
- success_criteria,
- existing_db_tag: None,
- validator_resource_override: NodeResourceOverride::default(),
- fullnode_resource_override: NodeResourceOverride::default(),
- retain_debug_logs: false,
- }
- }
-}
-
pub struct Forge<'cfg, F> {
options: &'cfg Options,
tests: ForgeConfig,
@@ -568,6 +254,15 @@ impl<'cfg, F: Factory> Forge<'cfg, F> {
let mut report = TestReport::new();
let mut summary = TestSummary::new(test_count, filtered_out);
+
+ // Optionally write junit xml test report for external processing
+ if let Some(junit_xml_path) = self.options.junit_xml_path.as_ref() {
+ let junit_observer = JunitTestObserver::new(
+ self.tests.get_suite_name().unwrap_or("local".to_string()),
+ junit_xml_path.to_owned(),
+ );
+ summary.add_observer(boxed!(junit_observer));
+ }
summary.write_starting_msg()?;
if test_count > 0 {
@@ -603,9 +298,9 @@ impl<'cfg, F: Factory> Forge<'cfg, F> {
swarm.chain_info().into_aptos_public_info(),
&mut report,
);
- let result = run_test(|| runtime.block_on(test.run(&mut aptos_ctx)));
+ let result = process_test_result(runtime.block_on(test.run(&mut aptos_ctx)));
report.report_text(result.to_string());
- summary.handle_result(test.name().to_owned(), result)?;
+ summary.handle_result(test.details(), result)?;
}
// Run AdminTests
@@ -615,9 +310,9 @@ impl<'cfg, F: Factory> Forge<'cfg, F> {
swarm.chain_info(),
&mut report,
);
- let result = run_test(|| test.run(&mut admin_ctx));
+ let result = process_test_result(test.run(&mut admin_ctx));
report.report_text(result.to_string());
- summary.handle_result(test.name().to_owned(), result)?;
+ summary.handle_result(test.details(), result)?;
}
let logs_location = swarm.logs_location();
@@ -634,17 +329,18 @@ impl<'cfg, F: Factory> Forge<'cfg, F> {
let handle = network_ctx.runtime.handle().clone();
let _handle_context = handle.enter();
let network_ctx = NetworkContextSynchronizer::new(network_ctx, handle.clone());
- let result = run_test(|| handle.block_on(test.run(network_ctx.clone())));
+ let result = process_test_result(handle.block_on(test.run(network_ctx.clone())));
// explicitly keep network context in scope so that its created tokio Runtime drops after all the stuff has run.
let NetworkContextSynchronizer { ctx, handle } = network_ctx;
drop(handle);
let ctx = Arc::into_inner(ctx).unwrap().into_inner();
drop(ctx);
report.report_text(result.to_string());
- summary.handle_result(test.name().to_owned(), result)?;
+ summary.handle_result(test.details(), result)?;
}
report.print_report();
+ summary.finish()?;
io::stdout().flush()?;
io::stderr().flush()?;
@@ -692,22 +388,8 @@ impl<'cfg, F: Factory> Forge<'cfg, F> {
}
}
-enum TestResult {
- Ok,
- FailedWithMsg(String),
-}
-
-impl Display for TestResult {
- fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
- match self {
- TestResult::Ok => write!(f, "Test Ok"),
- TestResult::FailedWithMsg(msg) => write!(f, "Test Failed: {}", msg),
- }
- }
-}
-
-fn run_test Result<()>>(f: F) -> TestResult {
- match f() {
+fn process_test_result(result: Result<()>) -> TestResult {
+ match result {
Ok(()) => TestResult::Ok,
Err(e) => {
let is_triggerd_by_github_actions =
@@ -721,103 +403,6 @@ fn run_test Result<()>>(f: F) -> TestResult {
}
}
-struct TestSummary {
- stdout: StandardStream,
- total: usize,
- filtered_out: usize,
- passed: usize,
- failed: Vec,
-}
-
-impl TestSummary {
- fn new(total: usize, filtered_out: usize) -> Self {
- Self {
- stdout: StandardStream::stdout(ColorChoice::Auto),
- total,
- filtered_out,
- passed: 0,
- failed: Vec::new(),
- }
- }
-
- fn handle_result(&mut self, name: String, result: TestResult) -> io::Result<()> {
- write!(self.stdout, "test {} ... ", name)?;
- match result {
- TestResult::Ok => {
- self.passed += 1;
- self.write_ok()?;
- },
- TestResult::FailedWithMsg(msg) => {
- self.failed.push(name);
- self.write_failed()?;
- writeln!(self.stdout)?;
-
- write!(self.stdout, "Error: {}", msg)?;
- },
- }
- writeln!(self.stdout)?;
- Ok(())
- }
-
- fn write_ok(&mut self) -> io::Result<()> {
- self.stdout
- .set_color(ColorSpec::new().set_fg(Some(Color::Green)))?;
- write!(self.stdout, "ok")?;
- self.stdout.reset()?;
- Ok(())
- }
-
- fn write_failed(&mut self) -> io::Result<()> {
- self.stdout
- .set_color(ColorSpec::new().set_fg(Some(Color::Red)))?;
- write!(self.stdout, "FAILED")?;
- self.stdout.reset()?;
- Ok(())
- }
-
- fn write_starting_msg(&mut self) -> io::Result<()> {
- writeln!(self.stdout)?;
- writeln!(
- self.stdout,
- "running {} tests",
- self.total - self.filtered_out
- )?;
- Ok(())
- }
-
- fn write_summary(&mut self) -> io::Result<()> {
- // Print out the failing tests
- if !self.failed.is_empty() {
- writeln!(self.stdout)?;
- writeln!(self.stdout, "failures:")?;
- for name in &self.failed {
- writeln!(self.stdout, " {}", name)?;
- }
- }
-
- writeln!(self.stdout)?;
- write!(self.stdout, "test result: ")?;
- if self.failed.is_empty() {
- self.write_ok()?;
- } else {
- self.write_failed()?;
- }
- writeln!(
- self.stdout,
- ". {} passed; {} failed; {} filtered out",
- self.passed,
- self.failed.len(),
- self.filtered_out
- )?;
- writeln!(self.stdout)?;
- Ok(())
- }
-
- fn success(&self) -> bool {
- self.failed.is_empty()
- }
-}
-
#[cfg(test)]
mod test {
use super::*;
diff --git a/testsuite/forge_test.py b/testsuite/forge_test.py
index 5b6c4b567e674..a337e06fd5109 100644
--- a/testsuite/forge_test.py
+++ b/testsuite/forge_test.py
@@ -1,6 +1,7 @@
from contextlib import ExitStack
import json
import os
+import textwrap
import unittest
import tempfile
from datetime import datetime, timezone, timedelta
@@ -14,6 +15,8 @@
import forge
from forge import (
+ BEGIN_JUNIT,
+ END_JUNIT,
ForgeCluster,
ForgeConfigBackend,
ForgeContext,
@@ -29,6 +32,7 @@
find_recent_images,
find_recent_images_by_profile_or_features,
format_comment,
+ format_junit_xml,
format_pre_comment,
format_report,
get_all_forge_jobs,
@@ -167,6 +171,7 @@ def fake_context(
forge_username="banana-eater",
forge_blocking=True,
forge_retain_debug_logs="true",
+ forge_junit_xml_path=None,
github_actions="false",
github_job_url="https://banana",
)
@@ -661,6 +666,25 @@ def testPossibleAuthFailureMessage(self) -> None:
output = result.format(context)
self.assertFixture(output, "testPossibleAuthFailureMessage.fixture")
+ def testFormatJunitXml(self) -> None:
+ result = ForgeResult.empty()
+ context = fake_context()
+
+ result.set_output(
+ textwrap.dedent(
+ f"""
+ {BEGIN_JUNIT}
+
+ blah
+
+ {END_JUNIT}
+ """
+ )
+ )
+
+ output = format_junit_xml(context, result)
+ self.assertFixture(output, "testFormatJunitXml.fixture")
+
class ForgeMainTests(unittest.TestCase, AssertFixtureMixin):
maxDiff = None
diff --git a/testsuite/testcases/src/lib.rs b/testsuite/testcases/src/lib.rs
index 92320a3136405..3e3bc617c2fd7 100644
--- a/testsuite/testcases/src/lib.rs
+++ b/testsuite/testcases/src/lib.rs
@@ -39,6 +39,7 @@ use async_trait::async_trait;
use futures::future::join_all;
use rand::{rngs::StdRng, SeedableRng};
use std::{
+ borrow::Cow,
fmt::Write,
ops::DerefMut,
sync::Arc,
@@ -644,6 +645,15 @@ impl Test for CompositeNetworkTest {
fn name(&self) -> &'static str {
"CompositeNetworkTest"
}
+
+ fn reporting_name(&self) -> Cow<'static, str> {
+ let mut name_builder = self.test.name().to_owned();
+ for wrapper in self.wrappers.iter() {
+ name_builder = format!("{}({})", wrapper.name(), name_builder);
+ }
+ name_builder = format!("CompositeNetworkTest({}) with ", name_builder);
+ Cow::Owned(name_builder)
+ }
}
pub(crate) fn generate_onchain_config_blob(data: &[u8]) -> String {