diff --git a/.github/actions/aws-device-farm-run/action.yml b/.github/actions/aws-device-farm-run/action.yml deleted file mode 100644 index de6157ce9b6..00000000000 --- a/.github/actions/aws-device-farm-run/action.yml +++ /dev/null @@ -1,139 +0,0 @@ -name: 'aws-device-farm-run' -description: 'Run device test on AWS Device Farm' -inputs: - name: - description: 'Name of the test' - required: true - appType: - description: 'Type of upload for app' - required: true - appFile: - description: 'Application file to be uploaded' - required: false - testFile: - description: 'Application file to be uploaded' - required: false - testPackageType: - description: 'e.g. INSTRUMENTATION_TEST_PACKAGE, XCTEST_TEST_PACKAGE' - required: true - testType: - description: 'e.g. INSTRUMENTATION, XCTEST' - required: true - testFilter: - description: 'Filter tests' - required: false - externalData: - description: 'ARN of external data package' - required: false - AWS_ACCESS_KEY_ID: - description: "AWS_ACCESS_KEY_ID" - required: true - AWS_SECRET_ACCESS_KEY: - description: "AWS_SECRET_ACCESS_KEY" - required: true - AWS_ROLE_TO_ASSUME: - description: "AWS_ROLE_TO_ASSUME" - required: true - AWS_DEVICE_FARM_PROJECT_ARN: - description: "AWS_DEVICE_FARM_PROJECT_ARN" - required: true - AWS_DEVICE_FARM_DEVICE_POOL_ARN: - description: "AWS_DEVICE_FARM_DEVICE_POOL_ARN" - required: true - testSpecArn: - description: "ARN of test spec" - required: false -outputs: - runArn: - description: ARN of run - value: ${{ steps.schedule_run.outputs.runArn }} -runs: - using: "composite" - steps: - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v4 - with: - aws-region: us-west-2 - role-to-assume: ${{ inputs.AWS_ROLE_TO_ASSUME }} - role-session-name: ${{ github.run_id }} - role-duration-seconds: 14400 - - - name: Create upload app - shell: bash - run: | - response=$(aws devicefarm create-upload --type ${{ inputs.appType }} --name ${{ inputs.appFile }} --project-arn ${{ inputs.AWS_DEVICE_FARM_PROJECT_ARN }}) - echo "$response" - arn="$(jq -r '.upload.arn' <<< "$response")" - url="$(jq -r '.upload.url' <<< "$response")" - echo "app_arn=$arn" >> "$GITHUB_ENV" - echo "app_url=$url" >> "$GITHUB_ENV" - - - name: Create upload test package - shell: bash - run: | - response=$(aws devicefarm create-upload --type ${{ inputs.testPackageType }} --name ${{ inputs.testFile }} --project-arn ${{ inputs.AWS_DEVICE_FARM_PROJECT_ARN }}) - echo "$response" - arn="$(jq -r '.upload.arn' <<< "$response")" - url="$(jq -r '.upload.url' <<< "$response")" - echo "test_package_arn=$arn" >> "$GITHUB_ENV" - echo "test_package_url=$url" >> "$GITHUB_ENV" - - - name: Upload ${{ inputs.appFile }}, ${{ inputs.testFile }} - shell: bash - run: | - curl -T ${{ inputs.appFile }} "${{ env.app_url }}" - curl -T ${{ inputs.testFile }} "${{ env.test_package_url }}" - - max_checks=10 - sleep_time=5 - - check_status() { - aws devicefarm get-upload --arn "$1" | jq -r '.upload.status' - } - - while ((max_checks--)); do - status_app="$(check_status "${{ env.app_arn }}")" - status_test_package="$(check_status "${{ env.test_package_arn }}")" - - echo status_app="$status_app" - echo status_test_package="$status_test_package" - - if [[ "$status_app" == "SUCCEEDED" && "$status_test_package" == "SUCCEEDED" ]]; then - exit 0 - elif ((max_checks == 0)); then - echo "App or test package failed to upload" - exit 1 - fi - - sleep $sleep_time - done - - - name: Schedule test run - id: schedule_run - shell: bash - run: | - arn="$(aws devicefarm schedule-run \ - --project-arn ${{ inputs.AWS_DEVICE_FARM_PROJECT_ARN }} \ - --name "MapLibre Native ${{ inputs.name }}" \ - --app-arn ${{ env.app_arn }} \ - --device-pool-arn ${{ inputs.AWS_DEVICE_FARM_DEVICE_POOL_ARN }} \ - --test type=${{ inputs.testType }},testPackageArn=${{ env.test_package_arn }}${{ inputs.testFilter && ',filter=' }}${{ inputs.testFilter }}${{ inputs.testSpecArn && ',testSpecArn=' }}${{ inputs.testSpecArn }} \ - ${{ inputs.externalData && '--configuration extraDataPackageArn=' }}${{ inputs.externalData }} \ - --execution-configuration videoCapture=false \ - --output text --query "run.arn")" - - echo "runArn=$arn" >> "$GITHUB_OUTPUT" - - # wait until result is not PENDING - # https://awscli.amazonaws.com/v2/documentation/api/latest/reference/devicefarm/get-run.html#output - while true; do - sleep 30 - result="$(aws devicefarm get-run --arn "$arn" --output text --query "run.result")" - case $result in - FAILED|ERRORED|STOPPED) echo "Run $result" && exit 1 ;; - SKIPPED|PASSED) echo "Run $result" && exit 0 ;; - PENDING) continue ;; - *) echo "Unexpected run result $result" && exit 1 ;; - esac - done - diff --git a/.github/workflows/android-device-test.yml b/.github/workflows/android-device-test.yml index a7a22e9476a..9d1c2efbe52 100644 --- a/.github/workflows/android-device-test.yml +++ b/.github/workflows/android-device-test.yml @@ -70,6 +70,10 @@ jobs: steps: - uses: actions/checkout@v4 + - uses: actions/setup-node@v4 + with: + node-version-file: '.nvmrc' + - id: parent_workflow run: | conclusion=$(curl ${{ github.event.workflow_run.jobs_url }} | jq -r '.jobs[] | select(.name == "android-build").conclusion') @@ -130,33 +134,25 @@ jobs: aws-region: us-west-2 role-to-assume: ${{ vars.OIDC_AWS_ROLE_TO_ASSUME }} role-session-name: ${{ github.run_id }} - - - name: Upload external data - if: env.run_device_test == 'true' + + - name: Run ${{ matrix.test.name }} on AWS Device Farm run: | - export RESULTS_API=${{ secrets.MLN_RESULTS_API }} - export AWS_DEVICE_FARM_PROJECT_ARN=${{ vars.AWS_DEVICE_FARM_PROJECT_ARN }} - upload_arn="$(.github/workflows/android-device-test/upload-external-data.sh)" - echo external_data_arn="$upload_arn" >> "$GITHUB_ENV" - - - uses: ./.github/actions/aws-device-farm-run - id: aws_device_farm_run - if: env.run_device_test == 'true' - with: - name: ${{ matrix.test.name }} - appType: ANDROID_APP - appFile: ${{ matrix.test.appFile }} - testFile: ${{ matrix.test.testFile }} - testPackageType: INSTRUMENTATION_TEST_PACKAGE - testType: INSTRUMENTATION - testFilter: ${{ matrix.test.testFilter }} - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - AWS_ROLE_TO_ASSUME: ${{ vars.OIDC_AWS_ROLE_TO_ASSUME }} - AWS_DEVICE_FARM_PROJECT_ARN: ${{ vars.AWS_DEVICE_FARM_PROJECT_ARN }} - AWS_DEVICE_FARM_DEVICE_POOL_ARN: ${{ matrix.test.devicePool }} - externalData: ${{ env.external_data_arn }} - testSpecArn: ${{ matrix.test.testSpecArn }} + export name="${{ matrix.test.name }}" + export appType=ANDROID_APP + export appFile="${{ matrix.test.appFile }}" + export testFile="${{ matrix.test.testFile }}" + export testPackageType=INSTRUMENTATION_TEST_PACKAGE + export testType=INSTRUMENTATION + export testFilter="${{ matrix.test.testFilter }}" + export AWS_ACCESS_KEY_ID="${{ secrets.AWS_ACCESS_KEY_ID }}" + export AWS_SECRET_ACCESS_KEY="${{ secrets.AWS_SECRET_ACCESS_KEY }}" + export AWS_ROLE_TO_ASSUME="${{ vars.OIDC_AWS_ROLE_TO_ASSUME }}" + export AWS_DEVICE_FARM_PROJECT_ARN="${{ vars.AWS_DEVICE_FARM_PROJECT_ARN }}" + export AWS_DEVICE_FARM_DEVICE_POOL_ARN="${{ matrix.test.devicePool }}" + export testSpecArn="${{ matrix.test.testSpecArn }}" + export wait_for_completion=true + + echo run_arn="$(./scripts/aws-device-farm/aws-device-farm-run.sh)" > "$GITHUB_ENV" - name: Store Test Artifacts if: (matrix.test.name == 'Android Benchmark' || failure()) && env.run_device_test == 'true' @@ -164,19 +160,15 @@ jobs: npm install results_dir="$(mktemp -d)" echo results_dir="$results_dir" >> "$GITHUB_ENV" - node scripts/aws-device-farm/store-test-artifacts.mjs --runArn ${{ steps.aws_device_farm_run.outputs.runArn }} --outputDir "$results_dir" - zip -r test_artifacts.zip ${{ env.results_dir }} + node scripts/aws-device-farm/store-test-artifacts.mjs --runArn ${{ env.run_arn }} --outputDir "$results_dir" + zip -r test_artifacts.zip "$results_dir" - name: Store Benchmark Results if: matrix.test.name == 'Android Benchmark' && env.run_device_test == 'true' run: | - for zipfile in ${{ env.results_dir }}/*.zip; do - unzip "$zipfile" -d "${zipfile%.zip}" - done - - find "${{ env.results_dir }}" -name 'benchmark_results.json' | while read -r benchmark_json; do - aws s3 cp "$benchmark_json" "s3://maplibre-native/android-benchmark-render/$(uuidgen).json" - done + benchmark_results_dir="$(mktemp -d)" + node scripts/aws-device-farm/collect-benchmark-outputs.mjs --inputDir "${{ env.results_dir }}" --outputDir "$benchmark_results_dir" + node scripts/aws-device-farm/upload-benchmark-outputs-to-s3.mjs --dir "$benchmark_results_dir" - name: Upload Test Artifacts if: (matrix.test.name == 'Android Benchmark' || failure()) && env.run_device_test == 'true' diff --git a/.github/workflows/android-device-test/upload-external-data.sh b/.github/workflows/android-device-test/upload-external-data.sh deleted file mode 100755 index 873f95e899e..00000000000 --- a/.github/workflows/android-device-test/upload-external-data.sh +++ /dev/null @@ -1,46 +0,0 @@ -#!/bin/bash - -set -e - -# Check if the required variables are defined -if [ -z "$RESULTS_API" ] || [ -z "$AWS_DEVICE_FARM_PROJECT_ARN" ]; then - echo "Error: Missing required variables." - exit 1 -fi - -cd "$(mktemp -d)" - -cat << EOF > instrumentation-test-input.json -{ - "styleNames": ["Facebook Light", "MapTiler Basic", "Americana"], - "styleURLs": [ - "https://external.xx.fbcdn.net/maps/vt/style/canterbury_1_0/?locale=en_US", - "https://api.maptiler.com/maps/basic-v2/style.json?key=get_your_own_OpIi9ZULNHzrESv6T2vL", - "https://americanamap.org/style.json" - ], - "resultsAPI": "$RESULTS_API", - "apiKey": "get_your_own_OpIi9ZULNHzrESv6T2vL" -} -EOF - ->&2 zip instrumentation-test-input.zip instrumentation-test-input.json -upload_json="$(aws devicefarm create-upload --project-arn "$AWS_DEVICE_FARM_PROJECT_ARN" --type EXTERNAL_DATA --name instrumentation-test-input.zip --output json)" -upload_url="$(echo "$upload_json" | jq -r '.upload.url')" -upload_arn="$(echo "$upload_json" | jq -r '.upload.arn')" -curl -T instrumentation-test-input.zip "$upload_url" - -retries=5 -while true; do - upload_status="$(aws devicefarm get-upload --arn "$upload_arn" --output text --query upload.status)" - >&2 echo "Upload $upload_status" - sleep 1 - if [[ "$upload_status" == "SUCCEEDED" ]]; then - break - fi - - if (( --retries == 0 )); then - exit 1 - fi -done - -echo "$upload_arn" diff --git a/.github/workflows/ios-device-test.yml b/.github/workflows/ios-device-test.yml index a8236ff73fe..5ea5666f28e 100644 --- a/.github/workflows/ios-device-test.yml +++ b/.github/workflows/ios-device-test.yml @@ -49,20 +49,23 @@ jobs: with: files: "${{ matrix.test.xcTestFile }}, ${{ matrix.test.ipaFile }}" - - uses: ./.github/actions/aws-device-farm-run + - name: Run ${{ matrix.test.name }} on AWS Device Farm if: steps.check_files.outputs.files_exists == 'true' - with: - name: ${{ matrix.test.name }} - appType: IOS_APP - appFile: ${{ matrix.test.ipaFile }} - testFile: ${{ matrix.test.xcTestFile }} - testPackageType: XCTEST_TEST_PACKAGE - testType: XCTEST - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - AWS_ROLE_TO_ASSUME: ${{ vars.OIDC_AWS_ROLE_TO_ASSUME }} - AWS_DEVICE_FARM_PROJECT_ARN: ${{ vars.AWS_DEVICE_FARM_PROJECT_ARN }} - AWS_DEVICE_FARM_DEVICE_POOL_ARN: ${{ vars.AWS_DEVICE_FARM_IPHONE_DEVICE_POOL_ARN }} + run: | + export name="${{ matrix.test.name }}" + export appType=IOS_APP + export appFile="${{ matrix.test.ipaFile }}" + export testFile="${{ matrix.test.xcTestFile }}" + export testPackageType=XCTEST_TEST_PACKAGE + export testType=XCTEST + export AWS_ACCESS_KEY_ID="${{ secrets.AWS_ACCESS_KEY_ID }}" + export AWS_SECRET_ACCESS_KEY="${{ secrets.AWS_SECRET_ACCESS_KEY }}" + export AWS_ROLE_TO_ASSUME="${{ vars.OIDC_AWS_ROLE_TO_ASSUME }}" + export AWS_DEVICE_FARM_PROJECT_ARN="${{ vars.AWS_DEVICE_FARM_PROJECT_ARN }}" + export AWS_DEVICE_FARM_DEVICE_POOL_ARN="${{ vars.AWS_DEVICE_FARM_IPHONE_DEVICE_POOL_ARN }}" + export wait_for_completion=true + + ./scripts/aws-device-farm/aws-device-farm-run.sh - uses: LouisBrunner/checks-action@v2.0.0 if: always() diff --git a/.nvmrc b/.nvmrc index 805efa9f6fa..b3935607590 100644 --- a/.nvmrc +++ b/.nvmrc @@ -1 +1 @@ -20.14.0 \ No newline at end of file +23 \ No newline at end of file diff --git a/package-lock.json b/package-lock.json index 75976489063..aad56eeb7b8 100644 --- a/package-lock.json +++ b/package-lock.json @@ -23,7 +23,7 @@ "@octokit/rest": "^20.1.0", "@types/argparse": "^2.0.16", "@types/ejs": "^3.1.5", - "@types/node": "^20.16.1", + "@types/node": "^22.9.0", "argparse": "^2.0.1", "csscolorparser": "^1.0.3", "d3-queue": "3.0.7", @@ -2180,13 +2180,13 @@ "license": "MIT" }, "node_modules/@types/node": { - "version": "20.16.1", - "resolved": "https://registry.npmjs.org/@types/node/-/node-20.16.1.tgz", - "integrity": "sha512-zJDo7wEadFtSyNz5QITDfRcrhqDvQI1xQNQ0VoizPjM/dVAODqqIUWbJPkvsxmTI0MYRGRikcdjMPhOssnPejQ==", + "version": "22.9.0", + "resolved": "https://registry.npmjs.org/@types/node/-/node-22.9.0.tgz", + "integrity": "sha512-vuyHg81vvWA1Z1ELfvLko2c8f34gyA0zaic0+Rllc5lbCnbSyuvb2Oxpm6TAUAC/2xZN3QGqxBNggD1nNR2AfQ==", "dev": true, "license": "MIT", "dependencies": { - "undici-types": "~6.19.2" + "undici-types": "~6.19.8" } }, "node_modules/@xmldom/xmldom": { diff --git a/package.json b/package.json index b2644042477..f4c32fb1bc6 100644 --- a/package.json +++ b/package.json @@ -21,7 +21,7 @@ "@octokit/rest": "^20.1.0", "@types/argparse": "^2.0.16", "@types/ejs": "^3.1.5", - "@types/node": "^20.16.1", + "@types/node": "^22.9.0", "argparse": "^2.0.1", "csscolorparser": "^1.0.3", "d3-queue": "3.0.7", diff --git a/platform/android/MapLibreAndroidTestApp/src/main/java/org/maplibre/android/testapp/activity/benchmark/BenchmarkActivity.kt b/platform/android/MapLibreAndroidTestApp/src/main/java/org/maplibre/android/testapp/activity/benchmark/BenchmarkActivity.kt index f1215351049..d1455df678a 100644 --- a/platform/android/MapLibreAndroidTestApp/src/main/java/org/maplibre/android/testapp/activity/benchmark/BenchmarkActivity.kt +++ b/platform/android/MapLibreAndroidTestApp/src/main/java/org/maplibre/android/testapp/activity/benchmark/BenchmarkActivity.kt @@ -26,6 +26,7 @@ import org.maplibre.android.maps.MapLibreMap import org.maplibre.android.maps.MapLibreMap.CancelableCallback import org.maplibre.android.maps.MapView import org.maplibre.android.testapp.R +import org.maplibre.android.testapp.styles.TestStyles import org.maplibre.android.testapp.utils.BenchmarkInputData import org.maplibre.android.testapp.utils.BenchmarkResult import org.maplibre.android.testapp.utils.BenchmarkRun @@ -149,19 +150,19 @@ class BenchmarkActivity : AppCompatActivity() { return BenchmarkInputData( styleNames = listOf( "AWS Open Data Standard Light", - "Facebook Light", +// "Facebook Light", "Americana", - "Protomaps Light", - "Versatiles Colorful", - "OpenFreeMap Bright" +// "Protomaps Light", +// "Versatiles Colorful", + "OpenFreeMap Bright" ), styleURLs = listOf( "https://maps.geo.us-east-2.amazonaws.com/maps/v0/maps/OpenDataStyle/style-descriptor?key=v1.public.eyJqdGkiOiI1NjY5ZTU4My0yNWQwLTQ5MjctODhkMS03OGUxOTY4Y2RhMzgifR_7GLT66TNRXhZJ4KyJ-GK1TPYD9DaWuc5o6YyVmlikVwMaLvEs_iqkCIydspe_vjmgUVsIQstkGoInXV_nd5CcmqRMMa-_wb66SxDdbeRDvmmkpy2Ow_LX9GJDgL2bbiCws0wupJPFDwWCWFLwpK9ICmzGvNcrPbX5uczOQL0N8V9iUvziA52a1WWkZucIf6MUViFRf3XoFkyAT15Ll0NDypAzY63Bnj8_zS8bOaCvJaQqcXM9lrbTusy8Ftq8cEbbK5aMFapXRjug7qcrzUiQ5sr0g23qdMvnKJQFfo7JuQn8vwAksxrQm6A0ByceEXSfyaBoVpFcTzEclxUomhY.NjAyMWJkZWUtMGMyOS00NmRkLThjZTMtODEyOTkzZTUyMTBi", - "https://external.xx.fbcdn.net/maps/vt/style/canterbury_1_0/?locale=en_US", +// "https://external.xx.fbcdn.net/maps/vt/style/canterbury_1_0/?locale=en_US", "https://americanamap.org/style.json", - "https://api.protomaps.com/styles/v2/light.json?key=e761cc7daedf832a", - "https://tiles.versatiles.org/assets/styles/colorful.json", - "https://tiles.openfreemap.org/styles/bright" +// "https://api.protomaps.com/styles/v2/light.json?key=e761cc7daedf832a", +// "https://tiles.versatiles.org/assets/styles/colorful.json", + "https://tiles.openfreemap.org/styles/bright" ) ) } @@ -192,20 +193,26 @@ class BenchmarkActivity : AppCompatActivity() { mapView.getMapAsync { maplibreMap: MapLibreMap -> val benchmarkResult = BenchmarkResult(arrayListOf()) + val benchmarkSlowDuration = 70000 + val benchmarkFastDuration = 15000 + lifecycleScope.launch { val benchmarkRuns = inputData.styleNames.zip(inputData.styleURLs).flatMap { (styleName, styleUrl) -> listOf( - BenchmarkRun(styleName, styleUrl, true), - BenchmarkRun(styleName, styleUrl, false) + BenchmarkRun(styleName, styleUrl, true, benchmarkSlowDuration), + BenchmarkRun(styleName, styleUrl, false, benchmarkSlowDuration) ) }.toTypedArray() - val benchmarkIterations = 5 + val benchmarkIterations = 4 for (i in 0 until benchmarkIterations) { for (benchmarkRun in benchmarkRuns) { - val benchmarkRunResult = doBenchmarkRun(maplibreMap, benchmarkRun) + val benchmarkRunResult = doBenchmarkRun( + maplibreMap, + // do one fast run to cache needed tiles + if (i == 0) benchmarkRun.copy(duration = benchmarkFastDuration) else benchmarkRun) val benchmarkPair = Pair(benchmarkRun, benchmarkRunResult) - benchmarkResult.runs.add(benchmarkPair) - println(jsonPayload(BenchmarkResult(arrayListOf(benchmarkPair)))) + // don't store results for fast run + if (i != 0) benchmarkResult.runs.add(benchmarkPair) } } @@ -238,7 +245,7 @@ class BenchmarkActivity : AppCompatActivity() { for (place in PLACES) { maplibreMap.animateCameraSuspend( CameraUpdateFactory.newLatLngZoom(place, 14.0), - 10000 + benchmarkRun.duration ) } val endTime = System.nanoTime() @@ -305,10 +312,10 @@ class BenchmarkActivity : AppCompatActivity() { LatLng(38.9072, -77.0369), // DC LatLng(52.3702, 4.8952), // AMS LatLng(60.1699, 24.9384), // HEL - LatLng(-13.1639, -74.2236), // AYA - LatLng(52.5200, 13.4050), // BER - LatLng(12.9716, 77.5946), // BAN - LatLng(31.2304, 121.4737) // SHA +// LatLng(-13.1639, -74.2236), // AYA +// LatLng(52.5200, 13.4050), // BER +// LatLng(12.9716, 77.5946), // BAN +// LatLng(31.2304, 121.4737) // SHA ) } } diff --git a/platform/android/MapLibreAndroidTestApp/src/main/java/org/maplibre/android/testapp/utils/BenchmarkUtils.kt b/platform/android/MapLibreAndroidTestApp/src/main/java/org/maplibre/android/testapp/utils/BenchmarkUtils.kt index ed1d7da8c11..f49a1d014d1 100644 --- a/platform/android/MapLibreAndroidTestApp/src/main/java/org/maplibre/android/testapp/utils/BenchmarkUtils.kt +++ b/platform/android/MapLibreAndroidTestApp/src/main/java/org/maplibre/android/testapp/utils/BenchmarkUtils.kt @@ -25,6 +25,7 @@ data class BenchmarkRun( val styleName: String, val styleURL: String, val syncRendering: Boolean, + val duration: Int ) data class BenchmarkRunResult( @@ -58,6 +59,7 @@ fun jsonPayload(benchmarkResult: BenchmarkResult): JsonObject { put("renderer", JsonPrimitive(BuildConfig.FLAVOR)) put("debugBuild", JsonPrimitive(BuildConfig.DEBUG)) put("gitRevision", JsonPrimitive(GIT_REVISION)) + put("timestamp", JsonPrimitive(System.currentTimeMillis())) } } diff --git a/platform/android/scripts/run-benchmark.sh b/platform/android/scripts/run-benchmark.sh new file mode 100755 index 00000000000..118478a2633 --- /dev/null +++ b/platform/android/scripts/run-benchmark.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +./gradlew assembleLegacyRelease assembleLegacyReleaseAndroidTest -PtestBuildType=release + +export AWS_DEVICE_FARM_PROJECT_ARN="arn:aws:devicefarm:us-west-2:373521797162:project:20687d72-0e46-403e-8f03-0941850665bc" +export AWS_DEVICE_FARM_DEVICE_POOL_ARN="arn:aws:devicefarm:us-west-2:373521797162:devicepool:20687d72-0e46-403e-8f03-0941850665bc/b4d75cb6-f210-4927-b94e-17eae054fea7" +export appType=ANDROID_APP +export appFile="MapLibreAndroidTestApp/build/outputs/apk/legacy/release/MapLibreAndroidTestApp-legacy-release.apk" +export testFile="MapLibreAndroidTestApp/build/outputs/apk/androidTest/legacy/release/MapLibreAndroidTestApp-legacy-release-androidTest.apk" +export testType="INSTRUMENTATION" +export testPackageType="INSTRUMENTATION_TEST_PACKAGE" +export testSpecArn="arn:aws:devicefarm:us-west-2:373521797162:upload:20687d72-0e46-403e-8f03-0941850665bc/14862afb-cf88-44aa-9f1e-5131cbb22f01" +export testFilter="org.maplibre.android.benchmark.Benchmark" +export name="Android Benchmark" + +../../scripts/aws-device-farm/aws-device-farm-run.sh \ No newline at end of file diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index dd1949a95c9..a2b82ab9428 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -44,8 +44,8 @@ devDependencies: specifier: ^3.1.5 version: 3.1.5 '@types/node': - specifier: ^20.16.1 - version: 20.16.1 + specifier: ^22.9.0 + version: 22.9.0 argparse: specifier: ^2.0.1 version: 2.0.1 @@ -1545,8 +1545,8 @@ packages: resolution: {integrity: sha512-nv+GSx77ZtXiJzwKdsASqi+YQ5Z7vwHsTP0JY2SiQgjGckkBRKZnk8nIM+7oUZ1VCtuTz0+By4qVR7fqzp/Dfg==} dev: true - /@types/node@20.16.1: - resolution: {integrity: sha512-zJDo7wEadFtSyNz5QITDfRcrhqDvQI1xQNQ0VoizPjM/dVAODqqIUWbJPkvsxmTI0MYRGRikcdjMPhOssnPejQ==} + /@types/node@22.9.0: + resolution: {integrity: sha512-vuyHg81vvWA1Z1ELfvLko2c8f34gyA0zaic0+Rllc5lbCnbSyuvb2Oxpm6TAUAC/2xZN3QGqxBNggD1nNR2AfQ==} dependencies: undici-types: 6.19.8 dev: true diff --git a/scripts/aws-device-farm/aws-device-farm-run.sh b/scripts/aws-device-farm/aws-device-farm-run.sh new file mode 100755 index 00000000000..31b58db2fd1 --- /dev/null +++ b/scripts/aws-device-farm/aws-device-farm-run.sh @@ -0,0 +1,106 @@ +#!/bin/bash + +# List of required environment variables +required_vars=( + "AWS_DEVICE_FARM_PROJECT_ARN" + "AWS_DEVICE_FARM_DEVICE_POOL_ARN" + "appFile" + "appType" + "testFile" + "testType" + "testPackageType" + "name" +) + +check_var() { + var_name=$1 + if [[ -z "${!var_name}" ]]; then + echo "Error: Environment variable $var_name is not set." + exit 1 + fi +} + +for var in "${required_vars[@]}"; do + check_var "$var" +done + +if [[ ! -f "$appFile" ]]; then + echo "Error: App file $appFile does not exist." + exit 1 +fi + +if [[ ! -f "$testFile" ]]; then + echo "Error: Test file $testFile does not exist." + exit 1 +fi + +# Create upload app +response=$(aws devicefarm create-upload --type "$appType" --name "$(basename "$appFile")" --project-arn "$AWS_DEVICE_FARM_PROJECT_ARN") +echo "$response" >&2 +app_arn="$(jq -r '.upload.arn' <<< "$response")" +app_url="$(jq -r '.upload.url' <<< "$response")" + +# Create upload test package +response=$(aws devicefarm create-upload --type "$testPackageType" --name "$(basename "$testFile")" --project-arn "$AWS_DEVICE_FARM_PROJECT_ARN") +echo "$response" >&2 +test_package_arn="$(jq -r '.upload.arn' <<< "$response")" +test_package_url="$(jq -r '.upload.url' <<< "$response")" + +# Upload app and test package +curl -T "$appFile" "$app_url" +curl -T "$testFile" "$test_package_url" + +max_checks=10 +sleep_time=5 + +check_status() { + aws devicefarm get-upload --arn "$1" | jq -r '.upload.status' +} + +while ((max_checks--)); do + status_app="$(check_status "$app_arn")" + status_test_package="$(check_status "$test_package_arn")" + + status_app="$status_app" + status_test_package="$status_test_package" + + if [[ "$status_app" == "SUCCEEDED" && "$status_test_package" == "SUCCEEDED" ]]; then + echo "Uploads succeeded" >&2 + break + elif ((max_checks == 0)); then + echo "App or test package failed to upload" >&2 + exit 1 + fi + + sleep $sleep_time +done + +# Schedule test run +arn="$(aws devicefarm schedule-run \ + --project-arn "$AWS_DEVICE_FARM_PROJECT_ARN" \ + --name "MapLibre Native $name" \ + --app-arn "$app_arn" \ + --device-pool-arn "$AWS_DEVICE_FARM_DEVICE_POOL_ARN" \ + --test type=$testType,testPackageArn=$test_package_arn${testFilter:+,filter=$testFilter}${testSpecArn:+,testSpecArn=$testSpecArn} \ + --execution-configuration videoCapture=false \ + --output text --query run.arn)" + +echo "$arn" + +if [[ "$wait_for_completion" != "true" ]]; then + echo "Not waiting for run to complete" >&2 + exit 0 +fi + +# wait until result is not PENDING +# https://awscli.amazonaws.com/v2/documentation/api/latest/reference/devicefarm/get-run.html#output +while true; do + sleep 30 + result="$(aws devicefarm get-run --arn "$arn" --output text --query "run.result")" + case $result in + FAILED|ERRORED|STOPPED) echo "Run $result" && exit 1 ;; + SKIPPED|PASSED) echo "Run $result" && exit 0 ;; + PENDING) continue ;; + *) echo "Unexpected run result $result" && exit 1 ;; + esac +done \ No newline at end of file diff --git a/scripts/aws-device-farm/collect-benchmark-outputs.mjs b/scripts/aws-device-farm/collect-benchmark-outputs.mjs new file mode 100644 index 00000000000..58c26f3eb43 --- /dev/null +++ b/scripts/aws-device-farm/collect-benchmark-outputs.mjs @@ -0,0 +1,87 @@ +import * as fs from "node:fs/promises"; +import { exec } from 'node:child_process'; +import { parseArgs, promisify } from "node:util"; +import * as path from "node:path"; + +const execPromise = promisify(exec); + +/** + * @returns {never} + */ +function usage() { + console.error("Collects benchmark outputs from AWS Device Farm artifacts"); + console.error("Usage: node collect-benchmark-outputs.mjs --inputDir INPUT_DIR --outputDir OUTPUT_DIR"); + process.exit(1); +} + +function getArgs() { + const { + values + } = parseArgs({ + options: { + outputDir: { + type: "string", + }, + inputDir: { + type: "string" + }, + }, + }); + const { outputDir, inputDir } = values; + if (typeof outputDir !== 'string') usage(); + + if (typeof inputDir !== 'string') usage(); + + return { + outputDir, + inputDir + } +} + +const { outputDir, inputDir } = getArgs(); + +await unzipFilesInDirectory(inputDir); +await writeResultsToOutputDir(); + +/////////////////////////////////////////////////////////////////////////////// + +/** + * + * @param {string} directory + */ +async function unzipFilesInDirectory(directory) { + try { + const files = await fs.readdir(directory); + const zipFiles = files.filter(file => file.endsWith('.zip')); + + for (const zipFile of zipFiles) { + const zipFilePath = path.join(directory, zipFile); + const { name } = path.parse(zipFile); + const extractDir = path.join(directory, name); + + await fs.mkdir(extractDir, { recursive: true }); + + await execPromise(`unzip "${zipFilePath}" -d "${extractDir}"`); + console.log(`Extracted ${zipFile} to ${extractDir}`); + } + + } catch (error) { + if (error instanceof Error) console.error(`Error unzipping files: ${error.message}`); + } +} + +async function writeResultsToOutputDir() { + for await (const benchmarkResultPath of fs.glob("**/benchmark_results.json", {cwd: inputDir})) { + try { + const fullBenchmarkResultPath = path.join(inputDir, benchmarkResultPath); + const benchmarkResultContents = await fs.readFile(fullBenchmarkResultPath, {encoding: 'utf-8'}); + const benchmarkResults = JSON.parse(benchmarkResultContents); + const { timestamp } = benchmarkResults; + if (typeof timestamp !== 'number') throw new Error("No timestamp found in bechmark result"); + await fs.copyFile(fullBenchmarkResultPath, path.join(outputDir, `${timestamp}.json`)); + } catch (err) { + console.error(`Error ${err}. Skipping: '${benchmarkResultPath}'`); + } + } + console.log(`Wrote benchmark results to ${outputDir}`); +} \ No newline at end of file diff --git a/scripts/aws-device-farm/device-farm-client.mjs b/scripts/aws-device-farm/device-farm-client.mjs index eb4a64f2699..77e1b2adf49 100644 --- a/scripts/aws-device-farm/device-farm-client.mjs +++ b/scripts/aws-device-farm/device-farm-client.mjs @@ -3,5 +3,5 @@ import { } from "@aws-sdk/client-device-farm"; export function getDeviceFarmClient() { - return new DeviceFarmClient({ region: "us-west-2" }); + return new DeviceFarmClient({ region: "us-west-2", retryMode: "adaptive", maxAttempts: 10 }); } diff --git a/scripts/aws-device-farm/plot-android-benchmark-results.py b/scripts/aws-device-farm/plot-android-benchmark-results.py new file mode 100644 index 00000000000..6207876f344 --- /dev/null +++ b/scripts/aws-device-farm/plot-android-benchmark-results.py @@ -0,0 +1,98 @@ +import sqlite3 +import matplotlib.pyplot as plt +import numpy as np +import math +import argparse + +parser = argparse.ArgumentParser("plot-android-benchmark-results") +parser.add_argument("db", help="Path to SQLite database with results", type=str) +args = parser.parse_args() + +# Connect to the SQLite database +conn = sqlite3.connect(args.db) # Replace with your database file path +cursor = conn.cursor() + +# Query to get the average FPS and variance for each style, renderer, and model where syncRendering=0 +query = """ +SELECT deviceManufacturer, model, styleName, renderer, + AVG(fps) AS avg_fps, + ( + AVG(fps * fps) - AVG(fps) * AVG(fps) + ) AS variance_fps +FROM benchmark_result +WHERE syncRendering = 0 +GROUP BY deviceManufacturer, model, styleName, renderer +ORDER BY deviceManufacturer, model, styleName, renderer +""" + +# Execute the query +cursor.execute(query) +rows = cursor.fetchall() + +# Close the connection to the database +conn.close() + +# Organize the data into a dictionary for plotting +data = {} +for row in rows: + manufacturer, model, style, renderer, avg_fps, variance_fps = row + device = f"{manufacturer} {model}" + if device not in data: + data[device] = {} + if style not in data[device]: + data[device][style] = {'fps': [], 'stddev': []} + stddev_fps = math.sqrt(max(variance_fps, 0)) # Calculate the standard deviation and ensure it's not negative + data[device][style]['fps'].append((renderer, avg_fps)) + data[device][style]['stddev'].append((renderer, stddev_fps)) + +# Define the colors for each renderer +colors = {'legacy': 'gray', 'drawable': 'blue', 'vulkan': 'red'} +renderer_order = ['legacy', 'drawable', 'vulkan'] + +legend_names = {'legacy': 'Legacy', 'drawable': 'OpenGL', 'vulkan': 'Vulkan'} + +# Create subplots for each device +num_devices = len(data) +fig, axes = plt.subplots(num_devices, 1, figsize=(10, 6 * num_devices)) + +if num_devices == 1: + axes = [axes] + +# Plot data for each device +for ax, (device, styles) in zip(axes, data.items()): + # Get a sorted list of styles + style_names = sorted(styles.keys()) + + # Number of styles + n_styles = len(style_names) + + # Create an array with the position of each group of bars + bar_width = 0.25 + index = np.arange(n_styles) + + # Plot each renderer's bar with error bars in the defined order + for i, renderer in enumerate(renderer_order): + avg_fps_values = [] + stddev_values = [] + for style in style_names: + fps_values = dict(styles[style]['fps']) + stddev_values_dict = dict(styles[style]['stddev']) + # Use get to return a default of 0 if the renderer is not present + avg_fps_values.append(fps_values.get(renderer, 0)) + stddev_values.append(stddev_values_dict.get(renderer, 0)) + ax.bar(index + i * bar_width, avg_fps_values, bar_width, yerr=stddev_values, + label=legend_names[renderer], color=colors[renderer], capsize=5, error_kw={'elinewidth': 1, 'ecolor': 'black'}) + + # Set the title, labels, ticks, etc. + ax.set_title(device) + ax.set_xlabel('Style') + ax.set_ylabel('Average FPS') + ax.set_xticks(index + bar_width) + ax.set_xticklabels(style_names) + ax.legend(title='Renderer') + +# Adjust layout +plt.tight_layout() + +# Show the plot +plt.savefig('plot.png') \ No newline at end of file diff --git a/scripts/aws-device-farm/store-test-artifacts.mjs b/scripts/aws-device-farm/store-test-artifacts.mjs index b69a16d0bad..2d781c4aa47 100644 --- a/scripts/aws-device-farm/store-test-artifacts.mjs +++ b/scripts/aws-device-farm/store-test-artifacts.mjs @@ -8,6 +8,18 @@ import { parseArgs } from "node:util"; import { ArtifactType, ListArtifactsCommand, ListJobsCommand, ListSuitesCommand } from "@aws-sdk/client-device-farm"; import { getDeviceFarmClient } from "./device-farm-client.mjs"; +/** + * @returns {never} + */ +function usage() { + console.error("Stores artifacts from AWS Device Farm run"); + console.error("Usage: node store-test-artifacts.mjs --outputDir OUTPUT_DIR --runArn RUN_ARN"); + console.error("Arguments:") + console.error("--customerArtifacts: only download customer artifacts"); + console.error("--testsSuite: only download stuff from Tests Suite"); + process.exit(1); +} + function getArgs() { const { values @@ -17,10 +29,11 @@ function getArgs() { type: "string", }, runArn: { - type: "string" + type: "string", + multiple: true }, testsSuite: { - type: "boolean" + type: "boolean" }, customerArtifacts: { type: "boolean" @@ -30,7 +43,7 @@ function getArgs() { const { outputDir, runArn } = values; if (typeof outputDir !== 'string') usage(); - if (typeof runArn !== 'string') usage(); + if (!runArn || !runArn.length) usage(); function suitesFilter() { const names = new Set(); @@ -57,56 +70,54 @@ function getArgs() { const { outputDir, runArn, suitesFilter, artifactsToDownload } = getArgs(); -/** - * @returns {never} - */ -function usage() { - console.error("Stores artifacts from AWS Device Farm run"); - console.error("Usage: node store-test-artifacts.mjs --outputDir OUTPUT_DIR --runArn RUN_ARN"); - console.error("Arguments:") - console.error("--customerArtifacts: only download customer artifacts"); - console.error("--testsSuite: only download stuff from Tests Suite"); - process.exit(1); -} - if (!fs.existsSync(outputDir)) { console.error("Output dir does not exist"); process.exit(1); } const deviceFarmClient = getDeviceFarmClient(); +await storeRunArtifacts(runArn, outputDir); + +/////////////////////////////////////////////////////////////////////////////// /** * Looks for the run with the provided ARN and returns the test spec output. * - * @param {string} arn + * @param {string[]} arnArr + * @param {string} outputDir * @returns string */ -async function getTestSpecOutput(arn) { - const jobs = await deviceFarmClient.send(new ListJobsCommand({ - arn - })); - - await Promise.all((jobs.jobs || []).map(async (job) => { - const suites = await deviceFarmClient.send(new ListSuitesCommand({arn: job.arn})); - await Promise.all((suites.suites || []).filter(suitesFilter).map(async (suite) => { - const artifacts = await deviceFarmClient.send(new ListArtifactsCommand({ - arn: suite.arn, - type: 'FILE' - })); - await Promise.all((artifacts.artifacts || []).map(async (artifact) => { - if (!artifact.name || !artifact.url || !artifact.type) return; - if (artifactsToDownload.includes(artifact.type)) { - const filename = `${artifact.name.replaceAll(' ', '_')}-${crypto.randomBytes(10).toString('hex')}.${artifact.extension}`; - const res = await fetch(artifact.url); - if (!res.ok || !res.body) return; - const destination = path.resolve(outputDir, filename); - const fileStream = fs.createWriteStream(destination, { flags: 'wx' }); - await finished(Readable.fromWeb(/** @type {any} **/ (res.body)).pipe(fileStream)); - } +async function storeRunArtifacts(arnArr, outputDir) { + for (const arn of arnArr) { + const jobs = await deviceFarmClient.send(new ListJobsCommand({ + arn + })); + + await Promise.all((jobs.jobs || []).map(async (job) => { + const suites = await deviceFarmClient.send(new ListSuitesCommand({ arn: job.arn })); + await Promise.all((suites.suites || []).filter(suitesFilter).map(async (suite) => { + const artifacts = await deviceFarmClient.send(new ListArtifactsCommand({ + arn: suite.arn, + type: 'FILE' + })); + await Promise.all((artifacts.artifacts || []).map(async (artifact) => { + if (!artifact.name || !artifact.url || !artifact.type) return; + if (artifactsToDownload.includes(artifact.type)) { + if (!artifact.arn) return; + const destination = path.join(outputDir, `${Buffer.from(artifact.arn).toString('base64')}.${artifact.extension}`); + try { + await fs.promises.access(destination); + return; // already exists + } catch (err) { + } + const res = await fetch(artifact.url); + if (!res.ok || !res.body) return; + const fileStream = fs.createWriteStream(destination, { flags: 'wx' }); + await finished(Readable.fromWeb(/** @type {any} **/(res.body)).pipe(fileStream)); + } + })); })); })); - })); + } + console.log(`Wrote run artifacts to ${outputDir}`) } - -await getTestSpecOutput(runArn); diff --git a/scripts/aws-device-farm/update-benchmark-db.mjs b/scripts/aws-device-farm/update-benchmark-db.mjs new file mode 100644 index 00000000000..fc79ff65d7f --- /dev/null +++ b/scripts/aws-device-farm/update-benchmark-db.mjs @@ -0,0 +1,195 @@ +import { parseArgs, promisify } from "node:util"; +import * as path from "node:path"; +import os, { type } from "node:os"; +import * as fs from "node:fs/promises"; +import { exec as execCallback } from "node:child_process"; +import { DatabaseSync } from "node:sqlite"; + +const exec = promisify(execCallback); + +/** + * @returns {never} + */ +function usage() { + console.error("Update database with benchmark results"); + console.error("Usage: node update-benchmark-db.mjs --dbPath PATH [--gitRevision SHA] [--jsonDir JSON_DIR] [--download]"); + process.exit(1); +} + +function getArgs() { + const { + values + } = parseArgs({ + options: { + dbPath: { + type: "string" + }, + gitRevision: { + type: "string", + }, + jsonDir: { + type: "string" + }, + download: { + type: "boolean" + } + }, + }); + const { gitRevision, jsonDir, download, dbPath } = values; + + if (typeof dbPath !== 'string') { + console.error("--dbPath is required"); + throw usage(); + } + + return { + gitRevision, + jsonDir, + download, + dbPath + } +} + +const { gitRevision, jsonDir, download, dbPath } = getArgs(); + + +async function getDir() { + if (jsonDir) { + return jsonDir; + } + if (!download) { + console.error("--jsonDir is required when not using --download"); + throw usage(); + } + return await fs.mkdtemp(path.join(os.tmpdir(), `benchmark-results-${gitRevision}-`)); +} + +let dir = await getDir(); + +if (download) await downloadBenchmarkResults(dir); + +const results = await loadResults(dir); + +updateDb(dbPath, results); + +/////////////////////////////////////////////////////////////////////////////// + +/** + * @param {string | undefined} gitRevision + * @returns {Promise} + */ +async function downloadBenchmarkResults(gitRevision) { + if (!gitRevision) { + console.error("--gitRevision is required when downloading"); + throw usage(); + } + + const bucket = "maplibre-native"; + const prefix = `android-benchmark-render/${gitRevision}`; + + try { + // Run the AWS CLI sync command + const { stdout, stderr } = await exec(`aws s3 sync s3://${bucket}/${prefix} ${dir}`); + console.log(stdout); + if (stderr) { + console.error(`Warning: ${stderr}`); + } + } catch (error) { + throw new Error(`Failed to sync directory s3://${bucket}/${prefix}. ${error}`); + } + + console.log(`Downloaded benchmark results to ${dir}`); + return dir; +} + +/** + * @param {string} dir + * @returns {Promise} An array with parsed JSON objects from each file + */ +async function loadResults(dir) { + return (await Promise.all((await fs.readdir(dir)) + .map(filePath => path.join(dir, filePath)) + .filter(filePath => path.extname(filePath) === ".json") + .map(filePath => fs.readFile(filePath, "utf-8")))) + .map(fileContents => JSON.parse(fileContents)); +} + +/** + * @param {string} dbPath + * @param {any[]} results + */ +function updateDb(dbPath, results) { + const db = new DatabaseSync(dbPath); + db.exec(` + CREATE TABLE IF NOT EXISTS benchmark_result ( + id TEXT PRIMARY KEY, + styleName TEXT NOT NULL, + fps REAL NOT NULL, + avgEncodingTime REAL NOT NULL, + low1pEncodingTime REAL NOT NULL, + avgRenderingTime REAL NOT NULL, + low1pRenderingTime REAL NOT NULL, + syncRendering BOOLEAN NOT NULL, + deviceManufacturer TEXT NOT NULL, + model TEXT NOT NULL, + renderer TEXT NOT NULL, + gitRevision TEXT NOT NULL + )`); + + const deleteExisting = db.prepare( + `DELETE FROM benchmark_result WHERE id = ?`); + + const stmt = db.prepare( + `INSERT INTO benchmark_result ( + id, + styleName, + fps, + avgEncodingTime, + low1pEncodingTime, + avgRenderingTime, + low1pRenderingTime, + syncRendering, + deviceManufacturer, + model, + renderer, + gitRevision + ) VALUES ( + @id, + @styleName, + @fps, + @avgEncodingTime, + @low1pEncodingTime, + @avgRenderingTime, + @low1pRenderingTime, + @syncRendering, + @deviceManufacturer, + @model, + @renderer, + @gitRevision + ) RETURNING *`); + + for (const run of results) { + for (const [index, result] of run.results.entries()) { + const id = `${run.timestamp}-${index}`; + const { + deviceManufacturer, + gitRevision, + model, + renderer, + } = run; + deleteExisting.run(id); + const params = { + ...result, + id, + deviceManufacturer, + gitRevision, + model, + renderer, + syncRendering: result.syncRendering ? 1 : 0, + }; + stmt.run(params); + } + } + + return db; +} diff --git a/scripts/aws-device-farm/upload-benchmark-outputs-to-s3.mjs b/scripts/aws-device-farm/upload-benchmark-outputs-to-s3.mjs new file mode 100644 index 00000000000..88221dc14e8 --- /dev/null +++ b/scripts/aws-device-farm/upload-benchmark-outputs-to-s3.mjs @@ -0,0 +1,87 @@ +import { parseArgs } from "node:util"; +import * as fs from "node:fs/promises"; +import * as path from "node:path"; +import { S3Client, PutObjectCommand } from "@aws-sdk/client-s3"; + +/** + * @returns {never} + */ +function usage() { + console.error("Uploads a directory of benchmark results (JSON files) to S3"); + console.error("Usage: node upload-benchmark-outputs-to-s3.mjs --dir DIR"); + process.exit(1); +} + +function getArgs() { + const { + values + } = parseArgs({ + options: { + dir: { + type: "string", + }, + }, + }); + const { dir } = values; + if (typeof dir !== 'string') usage(); + + return { + dir + } +} + +const { dir } = getArgs(); + +processFiles(dir); + +/////////////////////////////////////////////////////////////////////////////// + +/** + * + * @param {string} filePath + * @param {string} gitRevision + * @param {string} filename + */ +async function uploadFileToS3(filePath, gitRevision, filename) { + const s3Client = new S3Client({ region: "eu-central-1" }); + const bucket = "maplibre-native"; + const key = `android-benchmark-render/${gitRevision}/${filename}`; + + try { + const fileContent = await fs.readFile(filePath); + const command = new PutObjectCommand({ + Bucket: bucket, + Key: key, + Body: fileContent, + }); + await s3Client.send(command); + console.log(`Uploaded ${filename} to s3://${bucket}/${key}`); + } catch (error) { + console.error(`Failed to upload ${filename}:`, error); + } +} + +/** + * + * @param {string} dir + */ +async function processFiles(dir) { + const files = await fs.readdir(dir); + await Promise.all(files.map(async (filename) => { + const filePath = path.join(dir, filename); + if (path.extname(filename) !== ".json") return; + + try { + const fileContent = await fs.readFile(filePath, "utf-8"); + const data = JSON.parse(fileContent); + if (!data.gitRevision) { + console.error(`File ${filename} does not have a 'gitRevision' key. Skipping.`); + return; + } + + await uploadFileToS3(filePath, data.gitRevision, filename); + } catch (error) { + console.error(`Error processing file ${filename}:`, error); + } + })); +}