From 242b8cc21cfd6cb1b2bd3ec15f24dc3888352c63 Mon Sep 17 00:00:00 2001 From: Benjamin Wilson Date: Tue, 28 Mar 2023 16:40:36 -0700 Subject: [PATCH] Revert "documentation/readmes (#131)" This reverts commit fcae30c2cbae615621e97c514745d35c5d22a186. --- .github/workflows/mdbook.yml | 58 --- .gitignore | 2 +- DOWNLOAD.md | 38 ++ README.md | 130 ++++++- conda/INSTALL.md | 33 ++ guide/book.toml | 25 -- guide/mdbook-admonish.css | 352 ------------------ guide/src/SUMMARY.md | 22 -- guide/src/api/README.md | 0 guide/src/contributing.md | 4 - guide/src/datasets/README.md | 13 - guide/src/getting_started.md | 121 ------ guide/src/getting_started/README.md | 1 - guide/src/introduction.md | 18 - guide/src/tasks/3d_object_detection.md | 207 ---------- guide/src/tasks/3d_scene_flow.md | 77 ---- guide/src/tasks/README.md | 0 guide/src/tasks/motion_forecasting.md | 74 ---- guide/src/testing.md | 9 - guide/src/tutorials/3d_object_detection.md | 3 - guide/src/tutorials/README.md | 0 guide/src/tutorials/detection_data_loader.md | 1 - noxfile.py | 88 +++-- .../av2/datasets/lidar/README.md | 15 +- .../av2/datasets/motion_forecasting/README.md | 16 +- .../av2/datasets/sensor/README.md | 15 +- .../av2/datasets/tbv/README.md | 17 +- .../evaluation/detection/SUBMISSION_FORMAT.md | 46 +++ .../scene_flow/SUBMISSION_FORMAT.md | 42 +++ .../api/hd_maps.md => src/av2/map/README.md | 44 +-- ..._detection.py => detection_data_loader.py} | 0 31 files changed, 379 insertions(+), 1092 deletions(-) delete mode 100644 .github/workflows/mdbook.yml create mode 100644 DOWNLOAD.md create mode 100644 conda/INSTALL.md delete mode 100644 guide/book.toml delete mode 100644 guide/mdbook-admonish.css delete mode 100644 guide/src/SUMMARY.md delete mode 100644 guide/src/api/README.md delete mode 100644 guide/src/contributing.md delete mode 100644 guide/src/datasets/README.md delete mode 100644 guide/src/getting_started.md delete mode 100644 guide/src/getting_started/README.md delete mode 100644 guide/src/introduction.md delete mode 100644 guide/src/tasks/3d_object_detection.md delete mode 100644 guide/src/tasks/3d_scene_flow.md delete mode 100644 guide/src/tasks/README.md delete mode 100644 guide/src/tasks/motion_forecasting.md delete mode 100644 guide/src/testing.md delete mode 100644 guide/src/tutorials/3d_object_detection.md delete mode 100644 guide/src/tutorials/README.md delete mode 100644 guide/src/tutorials/detection_data_loader.md rename guide/src/datasets/lidar.md => src/av2/datasets/lidar/README.md (92%) rename guide/src/datasets/motion_forecasting.md => src/av2/datasets/motion_forecasting/README.md (84%) rename guide/src/datasets/sensor.md => src/av2/datasets/sensor/README.md (96%) rename guide/src/datasets/map_change_detection.md => src/av2/datasets/tbv/README.md (97%) create mode 100644 src/av2/evaluation/detection/SUBMISSION_FORMAT.md create mode 100644 src/av2/evaluation/scene_flow/SUBMISSION_FORMAT.md rename guide/src/api/hd_maps.md => src/av2/map/README.md (75%) rename tutorials/{3d_object_detection.py => detection_data_loader.py} (100%) diff --git a/.github/workflows/mdbook.yml b/.github/workflows/mdbook.yml deleted file mode 100644 index e702c2f1..00000000 --- a/.github/workflows/mdbook.yml +++ /dev/null @@ -1,58 +0,0 @@ -# Sample workflow for building and deploying a mdBook site to GitHub Pages -# -# To get started with mdBook see: https://rust-lang.github.io/mdBook/index.html -# -name: Deploy mdBook site to Pages - -on: - # Runs on pushes targeting the default branch - push: - branches: ["documentation/readmes"] - - # Allows you to run this workflow manually from the Actions tab - workflow_dispatch: - -# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages -permissions: - contents: read - pages: write - id-token: write - -# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued. -# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete. -concurrency: - group: "pages" - cancel-in-progress: false - -jobs: - # Build job - build: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - name: Install mdBook and mdbook-katex. - run: | - curl --proto '=https' --tlsv1.2 https://sh.rustup.rs -sSf -y | sh - rustup update - cargo install mdbook mdbook-katex mdbook-admonish mdbook-toc - - name: Setup Pages - id: pages - uses: actions/configure-pages@v3 - - name: Build with mdBook - run: mdbook build guide - - name: Upload artifact - uses: actions/upload-pages-artifact@v1 - with: - path: ./guide/book - - # Deployment job - deploy: - environment: - name: github-pages - url: ${{ steps.deployment.outputs.page_url }} - runs-on: ubuntu-latest - needs: build - steps: - - name: Deploy to GitHub Pages - id: deployment - uses: actions/deploy-pages@v1 diff --git a/.gitignore b/.gitignore index 41f6700b..7e645398 100644 --- a/.gitignore +++ b/.gitignore @@ -156,4 +156,4 @@ experiments *.pt *.mp4 -guide/book +guide/book \ No newline at end of file diff --git a/DOWNLOAD.md b/DOWNLOAD.md new file mode 100644 index 00000000..fedf4ddc --- /dev/null +++ b/DOWNLOAD.md @@ -0,0 +1,38 @@ +# Downloading the Argoverse 2 Datasets +Our datasets are available for download from [AWS S3](https://aws.amazon.com/s3/). For the best experience, we highly recommend using the open-source [s5cmd](https://github.com/peak/s5cmd) tool to transfer the data to your local filesystem (additional info available [here](https://aws.amazon.com/blogs/opensource/parallelizing-s3-workloads-s5cmd/)). Please note that an AWS account is not required to download the datasets. + +### Installing `s5cmd` + +`s5cmd` can be easily installed with the following script: + +```bash +#!/usr/bin/env bash + +export INSTALL_DIR=$HOME/.local/bin +export PATH=$PATH:$INSTALL_DIR +export S5CMD_URI=https://github.com/peak/s5cmd/releases/download/v1.4.0/s5cmd_1.4.0_$(uname | sed 's/Darwin/macOS/g')-64bit.tar.gz + +mkdir -p $INSTALL_DIR +curl -sL $S5CMD_URI | tar -C $INSTALL_DIR -xvzf - s5cmd +``` + +Note that it will install `s5cmd` in your local bin directory. You can always change the path if you prefer installing it in another directory. + +### Downloading Datasets +Once `s5cmd` is installed installed, downloading a dataset is as easy as running the following (using the sensor dataset as an example): + +```bash +s5cmd --no-sign-request cp "s3://argoai-argoverse/av2/sensor/*" target-directory +``` + +The command will download all S3 objects to the target directory (for example, `target-directory` can be `/home/av2/sensors/`). Given the size of the dataset, it might take a couple of hours depending on the network connectivity. + +When the download is finished, the dataset is ready to use! + +### Dataset S3 Locations +```bash +s3://argoai-argoverse/av2/sensor/ +s3://argoai-argoverse/av2/lidar/ +s3://argoai-argoverse/av2/motion-forecasting/ +s3://argoai-argoverse/av2/tbv/ +``` diff --git a/README.md b/README.md index 6d2a3394..e4c0cfc1 100644 --- a/README.md +++ b/README.md @@ -2,34 +2,123 @@ ![CI Status](https://github.com/argoai/av2-api/actions/workflows/ci.yml/badge.svg) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](./LICENSE) -# Argoverse 2 +# Argoverse 2 API > _Official_ GitHub repository for the [Argoverse 2](https://www.argoverse.org) family of datasets. -

- -

+If you have any questions or run into any problems with either the data or API, please feel free to open a [GitHub issue](https://github.com/argoai/av2-api/issues)! + +## Announcements + +### Argoverse competitions are live! + - Argoverse 2 + - 3D Object Detection + - Challenge Link: https://eval.ai/challenge/1710/overview + - Baseline: https://github.com/benjaminrwilson/torchbox3d + - Motion Forecasting + - Challenge Link: https://eval.ai/challenge/1719/overview + - Argoverse 1 + - Stereo + - Challenge Link: https://eval.ai/challenge/1704/overview + +## TL;DR + +- Install the API: `bash conda/install.sh` +- Read the [instructions](DOWNLOAD.md) to download the data. + +## Overview + +- [Setup](#setup) +- [Datasets](#datasets) +- [Testing](#testing) +- [Contributing](#contributing) +- [Citing](#citing) +- [License](#license) ## Getting Started -Please see the [Argoverse User Guide](https://argoverse.github.io/av2-api/). +### Setup + +The easiest way to install the API is via [conda](https://docs.conda.io/en/latest/) by running the following command: + +```bash +bash conda/install.sh +``` + +Additional information can be found in the: [INSTALL README](conda/INSTALL.md). + +### Datasets + +The _Argoverse 2_ family consists of **four** distinct datasets: + +| Dataset Name | Scenarios | Camera Imagery | Lidar| Maps | Additional Information| +| ---------------| --------: | :------------: | :--: | :--: | :--------------------:| +| Sensor | 1,000 | :white_check_mark: | :white_check_mark: | :white_check_mark: | [Sensor Dataset README](src/av2/datasets/sensor/README.md) | +| Lidar | 20,000 | | :white_check_mark: | :white_check_mark: | [Lidar Dataset README](src/av2/datasets/lidar/README.md) | +| Motion Forecasting | 250,000 | | | :white_check_mark: | [Motion Forecasting Dataset README](src/av2/datasets/motion_forecasting/README.md) | +| Map Change (Trust, but Verify) | 1,045 | :white_check_mark: | :white_check_mark: | :white_check_mark: | [Map Change Dataset README](src/av2/datasets/tbv/README.md) | -## Supported Datasets +Please see [DOWNLOAD.md](DOWNLOAD.md) for detailed instructions on how to download each dataset. -- Argoverse 2 (AV2) - - [Sensor](https://argoverse.github.io/av2-api/datasets/sensor.html) - - [Lidar](https://argoverse.github.io/av2-api/datasets/lidar.html) - - [Motion Forecasting](https://argoverse.github.io/av2-api/datasets/motion_forecasting.html) -- Trust, but Verify (TbV) - - [Map Change Detection](https://argoverse.github.io/av2-api/datasets/map_change_detection.html) +
+

Sensor Dataset

+ + + + +
-## Supported Tasks +
+

Lidar Dataset

+ + + + +
-- Argoverse 2 (AV2) - - [3D Object Detection](https://argoverse.github.io/av2-api/tasks/3d_object_detection.html) - - [3D Scene Flow](https://argoverse.github.io/av2-api/tasks/3d_scene_flow.html) - - [Motion Forecasting](https://argoverse.github.io/av2-api/tasks/motion_forecasting.html) +
+

Motion Forecasting Dataset

+ + + +
+ +
+

Map Change Dataset (Trust, but Verify)

+ + + + +
+ +### Map API + +Please refer to the [map README](src/av2/map/README.md) for additional details about the common format for vector and +raster maps that we employ across all AV2 datasets. + +## Compatibility Matrix + +| `Python Version` | `linux` | `macOS` | `windows` | +| ------------- | :----------------: | :----------------: | :----------------: | +| `3.8` | :white_check_mark: | :white_check_mark: | :white_check_mark: | +| `3.9` | :white_check_mark: | :white_check_mark: | :white_check_mark: | +| `3.10` | :white_check_mark: | :white_check_mark: | :white_check_mark: | + +## Testing + +All incoming pull requests are tested using [nox](https://nox.thea.codes/en/stable/) as +part of the CI process. This ensures that the latest version of the API is always stable on all supported platforms. You +can run the full suite of automated checks and tests locally using the following command: + +```bash +nox -r +``` + +## Contributing + +Have a cool feature you'd like to add? Found an unhandled corner case? The Argoverse team welcomes contributions from +the open source community - please open a PR using the following [template](.github/pull_request_template.md)! ## Citing @@ -44,7 +133,7 @@ Please use the following citation when referencing the [Argoverse 2](https://dat } ``` -Use the following citation when referencing the [Trust, but Verify](https://datasets-benchmarks-proceedings.neurips.cc/paper/2021/file/6f4922f45568161a8cdf4ad2299f6d23-Paper-round2.pdf) _Map Change Detection_ Dataset: +Use the following citation when referencing the [Argoverse 2](https://datasets-benchmarks-proceedings.neurips.cc/paper/2021/file/6f4922f45568161a8cdf4ad2299f6d23-Paper-round2.pdf) _Map Change_ Dataset: ```BibTeX @INPROCEEDINGS { TrustButVerify, author = {John Lambert and James Hays}, @@ -53,3 +142,8 @@ Use the following citation when referencing the [Trust, but Verify](https://data year = {2021} } ``` + +## License + +All code provided within this repository is released under the **MIT license** and bound by the _Argoverse_ **terms of use**, +please see [LICENSE](LICENSE) and [NOTICE](NOTICE) for additional details. diff --git a/conda/INSTALL.md b/conda/INSTALL.md new file mode 100644 index 00000000..6723d23d --- /dev/null +++ b/conda/INSTALL.md @@ -0,0 +1,33 @@ +# Installation + +We _highly_ recommend using `conda` with the `conda-forge` channel for package management. + +## Install `conda` + +You will need to install `conda` on your machine. We recommend to install the `conda-forge` version of `conda` found at https://github.com/conda-forge/miniforge#install. + +## Install `av2` + +Simply run: + +```bash +bash install.sh +``` + +which will install _all_ of the necessary dependencies in a conda environment named `av2`. + +To activate your environment (i.e., update your system paths), run: + +```bash +conda activate av2 +``` + +## FAQ + +> Why manage dependencies in `conda` instead of `pip`? + +`conda` enables package management outside of the `python` ecosystem. This enables us to specify all necessary dependencies in `environment.yml`. Further, gpu-based packages (e.g., `torch`) are handled better through `conda`. + +> Why `conda-forge`? + +`conda-forge` is a community-driven channel of conda recipes. It includes a large number of packages which can all be properly tracked in the `conda` resolver allowing for consistent environments without conflicts. \ No newline at end of file diff --git a/guide/book.toml b/guide/book.toml deleted file mode 100644 index 637b0907..00000000 --- a/guide/book.toml +++ /dev/null @@ -1,25 +0,0 @@ -[book] -authors = ["Argoverse Team"] -language = "en" -multilingual = false -src = "src" -title = "Argoverse User Guide" - -[output.html] -curly-quotes = true -mathjax-support = true -additional-css = ["././mdbook-admonish.css"] - -[output.html.fold] -# enable = true -# level = 1 - -[preprocessor.katex] - -[preprocessor.admonish] -command = "mdbook-admonish" -assets_version = "2.0.0" # do not edit: managed by `mdbook-admonish install` - -[preprocessor.toc] -command = "mdbook-toc" -renderer = ["html"] diff --git a/guide/mdbook-admonish.css b/guide/mdbook-admonish.css deleted file mode 100644 index 5e360387..00000000 --- a/guide/mdbook-admonish.css +++ /dev/null @@ -1,352 +0,0 @@ -@charset "UTF-8"; -:root { - --md-admonition-icon--note: - url("data:image/svg+xml;charset=utf-8,"); - --md-admonition-icon--abstract: - url("data:image/svg+xml;charset=utf-8,"); - --md-admonition-icon--info: - url("data:image/svg+xml;charset=utf-8,"); - --md-admonition-icon--tip: - url("data:image/svg+xml;charset=utf-8,"); - --md-admonition-icon--success: - url("data:image/svg+xml;charset=utf-8,"); - --md-admonition-icon--question: - url("data:image/svg+xml;charset=utf-8,"); - --md-admonition-icon--warning: - url("data:image/svg+xml;charset=utf-8,"); - --md-admonition-icon--failure: - url("data:image/svg+xml;charset=utf-8,"); - --md-admonition-icon--danger: - url("data:image/svg+xml;charset=utf-8,"); - --md-admonition-icon--bug: - url("data:image/svg+xml;charset=utf-8,"); - --md-admonition-icon--example: - url("data:image/svg+xml;charset=utf-8,"); - --md-admonition-icon--quote: - url("data:image/svg+xml;charset=utf-8,"); - --md-details-icon: - url("data:image/svg+xml;charset=utf-8,"); -} - -:is(.admonition) { - display: flow-root; - margin: 1.5625em 0; - padding: 0 1.2rem; - color: var(--fg); - page-break-inside: avoid; - background-color: var(--bg); - border: 0 solid black; - border-inline-start-width: 0.4rem; - border-radius: 0.2rem; - box-shadow: 0 0.2rem 1rem rgba(0, 0, 0, 0.05), 0 0 0.1rem rgba(0, 0, 0, 0.1); -} -@media print { - :is(.admonition) { - box-shadow: none; - } -} -:is(.admonition) > * { - box-sizing: border-box; -} -:is(.admonition) :is(.admonition) { - margin-top: 1em; - margin-bottom: 1em; -} -:is(.admonition) > .tabbed-set:only-child { - margin-top: 0; -} -html :is(.admonition) > :last-child { - margin-bottom: 1.2rem; -} - -a.admonition-anchor-link { - display: none; - position: absolute; - left: -1.2rem; - padding-right: 1rem; -} -a.admonition-anchor-link:link, a.admonition-anchor-link:visited { - color: var(--fg); -} -a.admonition-anchor-link:link:hover, a.admonition-anchor-link:visited:hover { - text-decoration: none; -} -a.admonition-anchor-link::before { - content: "§"; -} - -:is(.admonition-title, summary) { - position: relative; - margin-block: 0; - margin-inline: -1.6rem -1.2rem; - padding-block: 0.8rem; - padding-inline: 4.4rem 1.2rem; - font-weight: 700; - background-color: rgba(68, 138, 255, 0.1); - display: flex; -} -:is(.admonition-title, summary) p { - margin: 0; -} -html :is(.admonition-title, summary):last-child { - margin-bottom: 0; -} -:is(.admonition-title, summary)::before { - position: absolute; - top: 0.625em; - inset-inline-start: 1.6rem; - width: 2rem; - height: 2rem; - background-color: #448aff; - mask-image: url('data:image/svg+xml;charset=utf-8,'); - -webkit-mask-image: url('data:image/svg+xml;charset=utf-8,'); - mask-repeat: no-repeat; - -webkit-mask-repeat: no-repeat; - mask-size: contain; - -webkit-mask-size: contain; - content: ""; -} -:is(.admonition-title, summary):hover a.admonition-anchor-link { - display: initial; -} - -details.admonition > summary.admonition-title::after { - position: absolute; - top: 0.625em; - inset-inline-end: 1.6rem; - height: 2rem; - width: 2rem; - background-color: currentcolor; - mask-image: var(--md-details-icon); - -webkit-mask-image: var(--md-details-icon); - mask-repeat: no-repeat; - -webkit-mask-repeat: no-repeat; - mask-size: contain; - -webkit-mask-size: contain; - content: ""; - transform: rotate(0deg); - transition: transform 0.25s; -} -details[open].admonition > summary.admonition-title::after { - transform: rotate(90deg); -} - -:is(.admonition):is(.note) { - border-color: #448aff; -} - -:is(.note) > :is(.admonition-title, summary) { - background-color: rgba(68, 138, 255, 0.1); -} -:is(.note) > :is(.admonition-title, summary)::before { - background-color: #448aff; - mask-image: var(--md-admonition-icon--note); - -webkit-mask-image: var(--md-admonition-icon--note); - mask-repeat: no-repeat; - -webkit-mask-repeat: no-repeat; - mask-size: contain; - -webkit-mask-repeat: no-repeat; -} - -:is(.admonition):is(.abstract, .summary, .tldr) { - border-color: #00b0ff; -} - -:is(.abstract, .summary, .tldr) > :is(.admonition-title, summary) { - background-color: rgba(0, 176, 255, 0.1); -} -:is(.abstract, .summary, .tldr) > :is(.admonition-title, summary)::before { - background-color: #00b0ff; - mask-image: var(--md-admonition-icon--abstract); - -webkit-mask-image: var(--md-admonition-icon--abstract); - mask-repeat: no-repeat; - -webkit-mask-repeat: no-repeat; - mask-size: contain; - -webkit-mask-repeat: no-repeat; -} - -:is(.admonition):is(.info, .todo) { - border-color: #00b8d4; -} - -:is(.info, .todo) > :is(.admonition-title, summary) { - background-color: rgba(0, 184, 212, 0.1); -} -:is(.info, .todo) > :is(.admonition-title, summary)::before { - background-color: #00b8d4; - mask-image: var(--md-admonition-icon--info); - -webkit-mask-image: var(--md-admonition-icon--info); - mask-repeat: no-repeat; - -webkit-mask-repeat: no-repeat; - mask-size: contain; - -webkit-mask-repeat: no-repeat; -} - -:is(.admonition):is(.tip, .hint, .important) { - border-color: #00bfa5; -} - -:is(.tip, .hint, .important) > :is(.admonition-title, summary) { - background-color: rgba(0, 191, 165, 0.1); -} -:is(.tip, .hint, .important) > :is(.admonition-title, summary)::before { - background-color: #00bfa5; - mask-image: var(--md-admonition-icon--tip); - -webkit-mask-image: var(--md-admonition-icon--tip); - mask-repeat: no-repeat; - -webkit-mask-repeat: no-repeat; - mask-size: contain; - -webkit-mask-repeat: no-repeat; -} - -:is(.admonition):is(.success, .check, .done) { - border-color: #00c853; -} - -:is(.success, .check, .done) > :is(.admonition-title, summary) { - background-color: rgba(0, 200, 83, 0.1); -} -:is(.success, .check, .done) > :is(.admonition-title, summary)::before { - background-color: #00c853; - mask-image: var(--md-admonition-icon--success); - -webkit-mask-image: var(--md-admonition-icon--success); - mask-repeat: no-repeat; - -webkit-mask-repeat: no-repeat; - mask-size: contain; - -webkit-mask-repeat: no-repeat; -} - -:is(.admonition):is(.question, .help, .faq) { - border-color: #64dd17; -} - -:is(.question, .help, .faq) > :is(.admonition-title, summary) { - background-color: rgba(100, 221, 23, 0.1); -} -:is(.question, .help, .faq) > :is(.admonition-title, summary)::before { - background-color: #64dd17; - mask-image: var(--md-admonition-icon--question); - -webkit-mask-image: var(--md-admonition-icon--question); - mask-repeat: no-repeat; - -webkit-mask-repeat: no-repeat; - mask-size: contain; - -webkit-mask-repeat: no-repeat; -} - -:is(.admonition):is(.warning, .caution, .attention) { - border-color: #ff9100; -} - -:is(.warning, .caution, .attention) > :is(.admonition-title, summary) { - background-color: rgba(255, 145, 0, 0.1); -} -:is(.warning, .caution, .attention) > :is(.admonition-title, summary)::before { - background-color: #ff9100; - mask-image: var(--md-admonition-icon--warning); - -webkit-mask-image: var(--md-admonition-icon--warning); - mask-repeat: no-repeat; - -webkit-mask-repeat: no-repeat; - mask-size: contain; - -webkit-mask-repeat: no-repeat; -} - -:is(.admonition):is(.failure, .fail, .missing) { - border-color: #ff5252; -} - -:is(.failure, .fail, .missing) > :is(.admonition-title, summary) { - background-color: rgba(255, 82, 82, 0.1); -} -:is(.failure, .fail, .missing) > :is(.admonition-title, summary)::before { - background-color: #ff5252; - mask-image: var(--md-admonition-icon--failure); - -webkit-mask-image: var(--md-admonition-icon--failure); - mask-repeat: no-repeat; - -webkit-mask-repeat: no-repeat; - mask-size: contain; - -webkit-mask-repeat: no-repeat; -} - -:is(.admonition):is(.danger, .error) { - border-color: #ff1744; -} - -:is(.danger, .error) > :is(.admonition-title, summary) { - background-color: rgba(255, 23, 68, 0.1); -} -:is(.danger, .error) > :is(.admonition-title, summary)::before { - background-color: #ff1744; - mask-image: var(--md-admonition-icon--danger); - -webkit-mask-image: var(--md-admonition-icon--danger); - mask-repeat: no-repeat; - -webkit-mask-repeat: no-repeat; - mask-size: contain; - -webkit-mask-repeat: no-repeat; -} - -:is(.admonition):is(.bug) { - border-color: #f50057; -} - -:is(.bug) > :is(.admonition-title, summary) { - background-color: rgba(245, 0, 87, 0.1); -} -:is(.bug) > :is(.admonition-title, summary)::before { - background-color: #f50057; - mask-image: var(--md-admonition-icon--bug); - -webkit-mask-image: var(--md-admonition-icon--bug); - mask-repeat: no-repeat; - -webkit-mask-repeat: no-repeat; - mask-size: contain; - -webkit-mask-repeat: no-repeat; -} - -:is(.admonition):is(.example) { - border-color: #7c4dff; -} - -:is(.example) > :is(.admonition-title, summary) { - background-color: rgba(124, 77, 255, 0.1); -} -:is(.example) > :is(.admonition-title, summary)::before { - background-color: #7c4dff; - mask-image: var(--md-admonition-icon--example); - -webkit-mask-image: var(--md-admonition-icon--example); - mask-repeat: no-repeat; - -webkit-mask-repeat: no-repeat; - mask-size: contain; - -webkit-mask-repeat: no-repeat; -} - -:is(.admonition):is(.quote, .cite) { - border-color: #9e9e9e; -} - -:is(.quote, .cite) > :is(.admonition-title, summary) { - background-color: rgba(158, 158, 158, 0.1); -} -:is(.quote, .cite) > :is(.admonition-title, summary)::before { - background-color: #9e9e9e; - mask-image: var(--md-admonition-icon--quote); - -webkit-mask-image: var(--md-admonition-icon--quote); - mask-repeat: no-repeat; - -webkit-mask-repeat: no-repeat; - mask-size: contain; - -webkit-mask-repeat: no-repeat; -} - -.navy :is(.admonition) { - background-color: var(--sidebar-bg); -} - -.ayu :is(.admonition), .coal :is(.admonition) { - background-color: var(--theme-hover); -} - -.rust :is(.admonition) { - background-color: var(--sidebar-bg); - color: var(--sidebar-fg); -} -.rust .admonition-anchor-link:link, .rust .admonition-anchor-link:visited { - color: var(--sidebar-fg); -} diff --git a/guide/src/SUMMARY.md b/guide/src/SUMMARY.md deleted file mode 100644 index 0ac1185d..00000000 --- a/guide/src/SUMMARY.md +++ /dev/null @@ -1,22 +0,0 @@ -# Summary - -# User Guide - -- [Introduction](./introduction.md) -- [Getting Started](./getting_started.md) -- [Datasets](./datasets/README.md) - - [Sensor](./datasets/sensor.md) - - [Lidar](./datasets/lidar.md) - - [Motion Forecasting](./datasets/motion_forecasting.md) - - [Map Change Detection (TbV)](./datasets/map_change_detection.md) -- [Supported Tasks](./tasks/README.md) - - [3D Object Detection](./tasks/3d_object_detection.md) - - [3D Scene Flow](./tasks/3d_scene_flow.md) - - [Motion Forecasting](./tasks/motion_forecasting.md) - -# API Reference Guide -- [HD Maps](./api/hd_maps.md) - -# Miscellaneous -- [Testing](./testing.md) -- [Contributing](./contributing.md) \ No newline at end of file diff --git a/guide/src/api/README.md b/guide/src/api/README.md deleted file mode 100644 index e69de29b..00000000 diff --git a/guide/src/contributing.md b/guide/src/contributing.md deleted file mode 100644 index 98bacd0e..00000000 --- a/guide/src/contributing.md +++ /dev/null @@ -1,4 +0,0 @@ -# Contributing - -Have a cool feature you'd like to add? Found an unhandled corner case? The Argoverse team welcomes contributions from -the open source community - please open a PR using the following [template](.github/pull_request_template.md)! diff --git a/guide/src/datasets/README.md b/guide/src/datasets/README.md deleted file mode 100644 index 0f376662..00000000 --- a/guide/src/datasets/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# Datasets - -The Argoverse 2 API supports multiple datasets spanning two separate publications. - -1. [Argoverse 2](https://datasets-benchmarks-proceedings.neurips.cc/paper/2021/file/4734ba6f3de83d861c3176a6273cac6d-Paper-round2.pdf) - - - Sensor - - Lidar - - Motion Forecasting - -2. [Trust, but Verify](https://datasets-benchmarks-proceedings.neurips.cc/paper/2021/file/6f4922f45568161a8cdf4ad2299f6d23-Paper-round2.pdf) - - - Map Change Detection diff --git a/guide/src/getting_started.md b/guide/src/getting_started.md deleted file mode 100644 index d6644e31..00000000 --- a/guide/src/getting_started.md +++ /dev/null @@ -1,121 +0,0 @@ -# Getting Started - -## Table of Contents - - - -## Overview - -In this section, we outline the following: - -1. Installing the supporting API, `av2`, for the Argoverse 2 and TbV family of datasets. -2. Downloading the datasets to your local machine or server. - - -## Setup - -We _highly_ recommend using `conda` with the `conda-forge` channel for package management. - -### Install `conda` - -You will need to install `conda` on your machine. We recommend to install the `conda-forge` version of `conda` found at [https://github.com/conda-forge/miniforge#install](). You may need to run a post-install step to initialize `conda`: - -```terminal -$(which conda) init $SHELL -``` - -~~~admonish note collapsible=true -You may need to run a post-install step to initialize `conda`: - -```terminal -$(which conda) init $SHELL -``` - -If `conda` is not found, you will need to add the binary to your `PATH` environment variable. -~~~ - -### Install `av2` - -In your terminal emulator run, - -```bash -bash install.sh -``` - -which will install _all_ of the necessary dependencies in a conda environment named `av2`. - -To activate your environment (i.e., update your system paths), run: - -```bash -conda activate av2 -``` - -## Downloading the data - -Our datasets are available for download from [AWS S3](https://aws.amazon.com/s3/). - -For the best experience, we highly recommend using the open-source [s5cmd](https://github.com/peak/s5cmd) tool to transfer the data to your local filesystem. Please note that an AWS account is not required to download the datasets. - -```admonish note collapsible=true -Additional info can be found at [https://aws.amazon.com/blogs/opensource/parallelizing-s3-workloads-s5cmd/](). -``` - -## Installing `s5cmd` - -### Conda Installation (Recommended) - -The easiest way to install `s5cmd` is through `conda` using the `conda-forge` channel: - -```terminal -conda install s5cmd -c conda-forge -``` - -### Manual Installation - -`s5cmd` can also be installed with the following script: - -```bash -#!/usr/bin/env bash - -export INSTALL_DIR=$HOME/.local/bin -export PATH=$PATH:$INSTALL_DIR -export S5CMD_URI=https://github.com/peak/s5cmd/releases/download/v1.4.0/s5cmd_1.4.0_$(uname | sed 's/Darwin/macOS/g')-64bit.tar.gz - -mkdir -p $INSTALL_DIR -curl -sL $S5CMD_URI | tar -C $INSTALL_DIR -xvzf - s5cmd -``` - -Note that it will install `s5cmd` in your local bin directory. You can always change the path if you prefer installing it in another directory. - -# Download the Datasets - -Run the following command to download the one or more of the datasets: - -```bash -#!/usr/bin/env bash - -# Dataset URIs -# s3://argoverse/av2/sensor/ -# s3://argoverse/av2/lidar/ -# s3://argoverse/av2/motion-forecasting/ -# s3://argoverse/av2/tbv/ - -export DATASET_NAME="sensor" # sensor, lidar, motion_forecasting or tbv. -export TARGET_DIR="$HOME/data/datasets" # Target directory on your machine. - -s5cmd --no-sign-request cp "s3://argoverse/av2/$DATASET_NAME/*" $TARGET_DIR -``` - -The command will all data for `$DATASET_NAME` to `$TARGET_DIR`. Given the size of the dataset, it might take a couple of hours depending on the network connectivity. - -When the download is finished, the dataset is ready to use! - -## FAQ - -> Why manage dependencies in `conda` instead of `pip`? - -`conda` enables package management outside of the `python` ecosystem. This enables us to specify all necessary dependencies in `environment.yml`. Further, gpu-based packages (e.g., `torch`) are handled better through `conda`. - -> Why `conda-forge`? - -`conda-forge` is a community-driven channel of conda recipes. It includes a large number of packages which can all be properly tracked in the `conda` resolver allowing for consistent environments without conflicts. diff --git a/guide/src/getting_started/README.md b/guide/src/getting_started/README.md deleted file mode 100644 index bad55622..00000000 --- a/guide/src/getting_started/README.md +++ /dev/null @@ -1 +0,0 @@ -# Getting Started diff --git a/guide/src/introduction.md b/guide/src/introduction.md deleted file mode 100644 index 8b4a7031..00000000 --- a/guide/src/introduction.md +++ /dev/null @@ -1,18 +0,0 @@ -# Introduction - -

- -

- -## Welcome - -Welcome to the Argoverse User Guide! This guide is intended to help you answer the following questions: - -1. How do I setup `av2-api`? -2. Where do I find the Argoverse 2 data? -2. Which datasets are best suited for my research? -3. Which tasks are natively supported in `av2-api`? - -## Getting Help - -We value your feedback. If you have any comments, suggestions, or issues please reach out to us on [Github](https://github.com/argoverse/av2-api/issues). diff --git a/guide/src/tasks/3d_object_detection.md b/guide/src/tasks/3d_object_detection.md deleted file mode 100644 index 03a7d0cf..00000000 --- a/guide/src/tasks/3d_object_detection.md +++ /dev/null @@ -1,207 +0,0 @@ -# 3D Object Detection - -## Table of Contents - - - -## Overview - -The Argoverse 3D Object Detection task differentiates itself with its **26** category taxonomy and **long-range** (150 m) detection evaluation. We detail the task, metrics, evaluation protocol, and detailed object taxonomy information below. - - -## Task Definition - -For a unique tuple, `(log_id, timestamp_ns)`, produce a _ranked_ set of predictions $\mathcal{P}$ that describe an object's location, size, and orientation in the 3D scene: - -$$ -\begin{align} - \mathcal{P} &= \left\{ x^{i}_{\text{ego}}, y^{i}_{\text{ego}}, z^{i}_{\text{ego}}, l^{i}_{\text{obj}}, w^{i}_{\text{obj}}, h^{i}_{\text{obj}}, \theta^{i}_{\text{obj}}, c^i, o^i \right\}_{i=1}^{N} \quad \text{where}, \\\\ - - x^{i}_{\text{ego}} &: \text{Location along the x-axis in the ego-vehicle reference frame.} \\ - y^{i}_{\text{ego}} &: \text{Location along the y-axis in the ego-vehicle reference frame.} \\ - z^{i}_{\text{ego}} &: \text{Location along the z-axis in the ego-vehicle reference frame.} \\ - l^{i}_{\text{obj}} &: \text{Extent along the x-axis in the object reference frame.} \\ - w^{i}_{\text{obj}} &: \text{Extent along the y-axis in the object reference frame.} \\ - h^{i}_{\text{obj}} &: \text{Extent along the z-axis in the object reference frame.} \\ - \theta^{i}_{\text{obj}} &: \text{Counter clockwise rotation from the x-axis in the object reference frame.} \\ - c^{i} &: \text{Predicted likelihood.} \\ - o^{i} &: \text{Categorical object label.} -\end{align} -$$ - -# 3D Object Detection Taxonomy - -1. `REGULAR_VEHICLE`: -Any conventionally sized passenger vehicle used for the transportation of people and cargo. This includes Cars, vans, pickup trucks, SUVs, etc. - -2. ``PEDESTRIAN``: -Person that is not driving or riding in/on a vehicle. They can be walking, standing, sitting, prone, etc. - -3. `BOLLARD`: -Bollards are short, sturdy posts installed in the roadway or sidewalk to control the flow of traffic. These may be temporary or permanent and are sometimes decorative. - -4. `CONSTRUCTION_CONE`: -Movable traffic cone that is used to alert drivers to a hazard. These will typically be orange and white striped and may or may not have a blinking light attached to the top. - -5. `CONSTRUCTION_BARREL`: -Construction Barrel is a movable traffic barrel that is used to alert drivers to a hazard. These will typically be orange and white striped and may or may not have a blinking light attached to the top. - -6. `STOP_SIGN`: -Red octagonal traffic sign displaying the word STOP used to notify drivers that they must come to a complete stop and make sure no other road users are coming before proceeding. - -7. `BICYCLE`: -Non-motorized vehicle that typically has two wheels and is propelled by human power pushing pedals in a circular motion. - -8. `LARGE_VEHICLE`: -Large motorized vehicles (four wheels or more) which do not fit into any more specific subclass. Examples include extended passenger vans, fire trucks, RVs, etc. - -9. `WHEELED_DEVICE`: -Objects involved in the transportation of a person and do not fit a more specific class. Examples range from skateboards, non-motorized scooters, segways, to golf-carts. - -10. `BUS`: -Standard city buses designed to carry a large number of people. - -11. `BOX_TRUCK`: -Chassis cab truck with an enclosed cube shaped cargo area. It should be noted that the cargo area is rigidly attached to the cab, and they do not articulate. - -12. `SIGN`: -Official road signs placed by the Department of Transportation (DOT signs) which are of interest to us. This includes yield signs, speed limit signs, directional control signs, construction signs, and other signs that provide required traffic control information. Note that Stop Sign is captured separately and informative signs such as street signs, parking signs, bus stop signs, etc. are not included in this class. - -13. `TRUCK`: -Vehicles that are clearly defined as a truck but does not fit into the subclasses of Box Truck or Truck Cab. Examples include common delivery vehicles (UPS, FedEx), mail trucks, garbage trucks, utility trucks, ambulances, dump trucks, etc. - -14. `MOTORCYCLE`: -Motorized vehicle with two wheels where the rider straddles the engine. These are capable of high speeds similar to a car. - -15. `BICYCLIST`: -Person actively riding a bicycle, non-pedaling passengers included. - -16. `VEHICULAR_TRAILER`: -Non-motorized, wheeled vehicle towed behind a motorized vehicle. - -17. `TRUCK_CAB`: -Heavy truck commonly known as “Semi cab”, “Tractor”, or “Lorry”. This refers to only the front of part of an articulated tractor trailer. - -18. `MOTORCYCLIST`: -Person actively riding a motorcycle or a moped, including passengers. - -19. `DOG`: -Any member of the canine family. - -20. `SCHOOL_BUS`: -Bus that primarily holds school children (typically yellow) and can control the flow of traffic via the use of an articulating stop sign and loading/unloading flasher lights. - -21. `WHEELED_RIDER`: -Person actively riding or being carried by a wheeled device. - -22. `STROLLER`: -Push-cart with wheels meant to hold a baby or toddler. - -23. `ARTICULATED_BUS`: -Articulated buses perform the same function as a standard city bus, but are able to bend (articulate) towards the center. These will also have a third set of wheels not present on a typical bus. - -24. `MESSAGE_BOARD_TRAILER`: -Trailer carrying a large, mounted, electronic sign to display messages. Often found around construction sites or large events. - -25. `MOBILE_PEDESTRIAN_SIGN`: -Movable sign designating an area where pedestrians may cross the road. - -26. `WHEELCHAIR`: -Chair fitted with wheels for use as a means of transport by a person who is unable to walk as a result of illness, injury, or disability. This includes both motorized and non-motorized wheelchairs as well as low-speed seated scooters not intended for use on the roadway. - -## Metrics - -All of our reported metrics require _assigning_ predictions to ground truth annotations written as $a_{\text{pd}, \text{gt}}$ to compute true positives (TP), false positives (FP), and false negatives (FN). - -Formally, we define a _true positive_ as: - -$$ -\text{TP}_{\text{pd}, \text{gt}} = \left\{ a_{\text{pd}, \text{gt}} : \lVert v_{\text{pd}} - v_{\text{gt}} \rVert_2 \leq d \right\}, -$$ - -where $d$ is a distance threshold in meters. - -```admonish important -Duplicate assignments are considered _false positives_. -``` - -### Average Precision - -Average precision measures the area underneath the precision / recall curve across different true positive distance thresholds. - -$$ -\begin{align} - \text{AP} &= \frac{1}{100}\underset{d \in \mathcal{D}}{\sum}\underset{r\in\mathcal{R}}{\sum}\text{p}_{\text{interp}}(r) \quad \text{where} \\ - \quad \mathcal{D} &= \left\{ 0.5 \text{ m}, 1.0 \text{ m}, 2.0 \text{ m}, 4.0 \text{ m} \right\} \\ - \quad \mathcal{R} &= \left\{ 0.01, 0.02, \dots, 1.00 \right\} -\end{align} -$$ - -### True Positive Metrics - -All true positive metrics are at a $2 \text{ m}$ threshold. - -#### Average Translation Error (ATE) - -ATE measures the distance between true positive assignments. - -$$ -\begin{align} - \text{ATE} = \lVert t_{\text{pd}}-t_{\text{gt}} \rVert_2 \quad \text{where} \quad t_{\text{pd}}\in\mathbb{R}^3,t_{\text{gt}}\in\mathbb{R}^3. -\end{align} -$$ - -#### Average Scale Error (ASE) - -ASE measures the shape misalignent for true positive assignments. - -$$ -\begin{align} - \text{ASE} = 1 - \underset{d\in\mathcal{D}}{\prod}\frac{\min(d_{\text{pd}},d_{\text{gt}})}{\max(d_{\text{pd}},d_\text{gt})}. -\end{align} -$$ - -#### Average Orientation Error (AOE) - -AOE measures the minimum angle between true positive assignments. - -$$ -\begin{align} - \text{AOE} = |\theta_{\text{pd}}-\theta_{\text{gt}}| \quad \text{where} \quad \theta_{\text{pd}}\in[0,\pi) \text{ and } \theta_{\text{gt}}\in[0,\pi). -\end{align} -$$ - -### Composite Detection Score (CDS) - -CDS measures the _overall_ performance across all previously introduced metrics. - -$$ -\begin{align} - \text{CDS}&= \text{AP} \cdot \underset{x\in\mathcal{X}}{\sum}{ 1-x }, \\ - \mathcal{X}&=\{\text{ATE}_{\text{unit}},\text{ASE}_{\text{unit}},\text{AOE}_{\text{unit}}\}, -\end{align} -$$ - -where $\{\text{ATE}_{\text{unit}},\text{ASE}_{\text{unit}},\text{AOE}_{\text{unit}}\}$ are the _normalized_ true positive errors. - -```admonish note -$\text{ATE}$, $\text{ASE}$, and $\text{AOE}$ are bounded by $2 \text{ m}$, $1$, and $\pi$. -``` - -```admonish important -CDS is the **ranking** metric. -``` - -# Evaluation - -The 3D object detection evaluation consists of the following steps: - -1. Partition the predictions and ground truth objects by a unique id, `(log_id: str, timestamp_ns: uint64)`, which corresponds to a single sweep. - -2. For the predictions and ground truth objects associated with a single sweep, greedily assign the predictions to the ground truth objects in _descending_ order by _likelihood_. - -3. Compute the true positive, false positive, and false negatives. - -4. Compute the true positive metrics. - -2. True positive, false positive, and false negative computation. diff --git a/guide/src/tasks/3d_scene_flow.md b/guide/src/tasks/3d_scene_flow.md deleted file mode 100644 index 09b46c0b..00000000 --- a/guide/src/tasks/3d_scene_flow.md +++ /dev/null @@ -1,77 +0,0 @@ -# 3D Scene Flow - -## Table of Contents - - - -## Overview - -In Argoverse 2 the LiDAR sensor samples the geometry around the AV every 0.1s, producing a set of 3D points called a "sweep". If the world were static, two successive sweeps would represent two different samples of the same geometry. In a non-static world, however, each point measured in the first sweep could have moved before being sampled again. 3D Scene Flow estimation aims to find these motion vectors that relate two successive LiDAR sweeps. - -## Labeling Procedure - -Since we do not have any direct way of measuring the motion of every point in the scene, we leverage object-level tracking labels to generate piecewise-rigid flow labels. We have a set of oriented bounding boxes for each sweep, one for each annotated object. For each bounding box, if the second sweep contains a corresponding bounding box, we can extract the rigid transformation that transforms points in the first box to the second. For each point inside the bounding box, we assign it the flow vector corresponding to that rigid motion. Points not belonging to any bounding box are assigned the ego-motion as flow. For objects that only appear in one frame, we cannot compute the ground truth flow, so they are ignored for evaluation purposes but included in the input. - -## Input - -- Sweep 1: (N x 4) The XYZ positions of each point in the first sweep as well as the intensity of the return. -- Sweep 2: (M x 4) The same but for the second sweep. -- Ego Motion: The pose of the autonomous vehicle in the second frame relative to the first. -- Ground annotations: For each sweep, we give a binary classification indicating if the point belongs to the ground as determined by the ground height map. - -## Output - -The purpose of the task is to produce two outputs. As described above, the main output is an N x 3 array of motion. However, we also ask that contestants submit a binary segmentation of the scene into "Dynamic" and "Static". This prediction should label points as "Dynamic" if they move faster than 0.5m/s *in the world frame*. - -# Getting Started - -## Data Loading - -Once the Sensor Dataset is set up (see [these instructions](https://github.com/argoverse/av2-api/blob/main/src/av2/datasets/sensor/README.md)), you can use the `SceneFlowDataloader` to load pairs of sweeps along with all the auxiliary information (poses and ground annotations) and flow annotations. The data loader can be found in `av2.torch.data_loaders.scene_flow`, and documentation can be found in the [source code](https://github.com/argoverse/av2-api/blob/main/src/av2/torch/data_loaders/scene_flow.py). - -## Evaluation Point Subset - -The contest only asks for flow and dynamic segmentation predictions on a subset of the input points. Specifically, we are only interested in points that do not belong to the ground and are within a 100m x 100m box centered on the origin. We offer a utility function `compute_eval_point_mask` in `av2.evaluation.scene_flow.utils` to compute this mask, but DO NOT USE THIS TO CREATE SUBMISSION FILES. To ensure consistency, we have pre-computed the masks for submission, which can be loaded using `get_eval_point_mask`. - - -# Contest Submission Format - -The evaluation expects a zip archive of [Apache Feather](https://arrow.apache.org/docs/python/feather.html) files --- one for each example. The unzipped directory must have the format: - -```terminal -- / - - .feather - - .feather - - ... -- / -- ... -``` - -The evaluation is run on a subset of the test set. Use the utility function `get_eval_subset` to get the `SceneFlowDataloader` indices to submit. Each feather file should contain your flow predictions for the subset of points returned by `get_eval_mask` in the format: - -- `flow_tx_m` (float16): x-component of the flow (in meters) in the first sweeps' ego-vehicle reference frame. -- `flow_ty_m` (float16): y-component of the flow (in meters) in the first sweeps' ego-vehicle reference frame. -- `flow_tz_m` (float16): z-component of the flow (in meters) in the first sweeps' ego-vehicle reference frame. -- `is_dynamic` (bool): Predicted dynamic/static labels for each point. A point is considered dynamic if its ground truth flow has a $\ell^2$-norm greater than $0.05 \textit{ m}$ once ego-motion has been removed. - - -For example, the first log in the test set is `0c6e62d7-bdfa-3061-8d3d-03b13aa21f68`, and the first timestamp is `315971435999927221`, so there should be a folder and file in the archive of the form: `0c6e62d7-bdfa-3061-8d3d-03b13aa21f68/315971435999927221.feather`. That file should look like this: -```python - flow_tx_m flow_ty_m flow_tz_m -0 -0.699219 0.002869 0.020233 -1 -0.699219 0.002790 0.020493 -2 -0.699219 0.002357 0.020004 -3 -0.701172 0.001650 0.013390 -4 -0.699219 0.002552 0.020187 -... ... ... ... -68406 -0.703613 -0.001801 0.002373 -68407 -0.704102 -0.000905 0.002567 -68408 -0.704590 -0.001390 0.000397 -68409 -0.704102 -0.001608 0.002283 -68410 -0.704102 -0.001619 0.002207 -``` -The file `example_submission.py` contains a basic example of how to output the submission files. The script `make_submission_archive.py` will create the zip archive for you and validate the submission format. Then submit the outputted file to the competition leaderboard! - -# Local Evaluation - -Before evaluating on the _test_ set, you will want to evaluate your model on the _validation_ set. To do this, first run `make_mask_files.py` and `make_annotation_files.py` to create files containing the minimum ground truth flow information needed to run the evaluation. Then, once your output is saved in the feather files described above, run `eval.py` to compute all leaderboard metrics. diff --git a/guide/src/tasks/README.md b/guide/src/tasks/README.md deleted file mode 100644 index e69de29b..00000000 diff --git a/guide/src/tasks/motion_forecasting.md b/guide/src/tasks/motion_forecasting.md deleted file mode 100644 index 376ac236..00000000 --- a/guide/src/tasks/motion_forecasting.md +++ /dev/null @@ -1,74 +0,0 @@ -# Motion Forecasting - -
- - - -
- -## Table of Contents - - - -## Overview - -The Argoverse 2 motion forecasting dataset consists of 250,000 scenarios, collected from 6 cities spanning multiple seasons. - -Each scenario is specifically designed to maximize interactions relevant to the ego-vehicle. This naturally results in the inclusion of actor-dense scenes featuring a range of vehicle and non-vehicle actor types. At the time of release, AV2 provides the largest object taxonomy, in addition to the broadest mapped area of any motion forecasting dataset released so far. - -## Download - -The latest version of the AV2 motion forecasting dataset can be downloaded from the Argoverse [website](https://www.argoverse.org/av2.html). - -## Scenarios and Tracks - -Each scenario is 11s long and consists of a collection of actor histories, which are represented as "tracks". For each scenario, we provide the following high-level attributes: - -- `scenario_id`: Unique ID associated with this scenario. -- `timestamps_ns`: All timestamps associated with this scenario. -- `tracks`: All tracks associated with this scenario. -- `focal_track_id`: The track ID associated with the focal agent of the scenario. -- `city_name`: The name of the city associated with this scenario. - -Each track is further associated with the following attributes: - -- `track_id`: Unique ID associated with this track -- `object_states`: States for each timestep where the track object had a valid observation. -- `object_type`: Inferred type for the track object. -- `category`: Assigned category for track - used as an indicator for prediction requirements and data quality. - -Track object states bundle all information associated with a particular actor at a fixed point in time: - -- `observed`: Boolean indicating if this object state falls in the observed segment of the scenario. -- `timestep`: Time step corresponding to this object state [0, num_scenario_timesteps). -- `position`: (x, y) Coordinates of center of object bounding box. -- `heading`: Heading associated with object bounding box (in radians, defined w.r.t the map coordinate frame). -- `velocity`: (x, y) Instantaneous velocity associated with the object (in m/s). - -Each track is assigned one of the following labels, which dictate scoring behavior in the Argoverse challenges: - -- `TRACK_FRAGMENT`: Lower quality track that may only contain a few timestamps of observations. -- `UNSCORED_TRACK`: Unscored track used for contextual input. -- `SCORED_TRACK`: High-quality tracks relevant to the AV - scored in the multi-agent prediction challenge. -- `FOCAL_TRACK`: The primary track of interest in a given scenario - scored in the single-agent prediction challenge. - -Each track is also assigned one of the following labels, as part of the 10-class object taxonomy: - -- Dynamic - - `VEHICLE` - - `PEDESTRIAN` - - `MOTORCYCLIST` - - `CYCLIST` - - `BUS` -- Static - - `STATIC` - - `BACKGROUND` - - `CONSTRUCTION` - - `RIDERLESS_BICYCLE` -- `UNKNOWN` - -For more additional details regarding the data schema, please refer [here](data_schema.py). - -## Visualization - -Motion forecasting scenarios can be visualized using the viz [`script`](../../../../tutorials/generate_forecasting_scenario_visualizations.py) or by calling the viz [`library`](viz/scenario_visualization.py#L48) directly. \ No newline at end of file diff --git a/guide/src/testing.md b/guide/src/testing.md deleted file mode 100644 index 5eab67aa..00000000 --- a/guide/src/testing.md +++ /dev/null @@ -1,9 +0,0 @@ -# Testing - -All incoming pull requests are tested using [nox](https://nox.thea.codes/en/stable/) as -part of the CI process. This ensures that the latest version of the API is always stable on all supported platforms. You -can run the full suite of automated checks and tests locally using the following command: - -```bash -nox -r -``` diff --git a/guide/src/tutorials/3d_object_detection.md b/guide/src/tutorials/3d_object_detection.md deleted file mode 100644 index de9558b7..00000000 --- a/guide/src/tutorials/3d_object_detection.md +++ /dev/null @@ -1,3 +0,0 @@ -```python -{{#include ../../../tutorials/3d_object_detection.py}} -``` diff --git a/guide/src/tutorials/README.md b/guide/src/tutorials/README.md deleted file mode 100644 index e69de29b..00000000 diff --git a/guide/src/tutorials/detection_data_loader.md b/guide/src/tutorials/detection_data_loader.md deleted file mode 100644 index 81c8590f..00000000 --- a/guide/src/tutorials/detection_data_loader.md +++ /dev/null @@ -1 +0,0 @@ -# Tutorials diff --git a/noxfile.py b/noxfile.py index 762c30bd..e2a40335 100644 --- a/noxfile.py +++ b/noxfile.py @@ -10,25 +10,9 @@ from nox import Session from nox.virtualenv import CondaEnv -PYTHON: Final = ["3.8", "3.9", "3.10"] -CI_ENV: Final = ( - "black[jupyter]", - "isort", - "flake8", - "flake8-annotations", - "flake8-black", - "flake8-bugbear", - "flake8-docstrings", - "flake8-import-order", - "darglint", - "mypy", - "types-pyyaml", - "pytest", - "pytest-benchmark", - "pytest-cov", -) - -nox.options.sessions = ["ci"] +PYTHON: Final[List[str]] = ["3.8", "3.9", "3.10"] + +nox.options.sessions = ["black", "isort", "lint", "mypy", "pytest"] def _setup(session: Session) -> None: @@ -61,16 +45,76 @@ def _setup(session: Session) -> None: @nox.session(python=PYTHON) -def ci(session: Session) -> None: - """Run CI against `av2`. +def black(session: Session) -> None: + """Run `black` against `av2`. Args: session: `nox` session. """ + env = ["black[jupyter]"] _setup(session) - session.install(*CI_ENV) + session.install(*env) session.run("black", ".") + + +@nox.session(python=PYTHON) +def isort(session: Session) -> None: + """Run `isort` against `av2`. + + Args: + session: `nox` session. + """ + env = ["isort"] + _setup(session) + session.install(*env) session.run("isort", ".") + + +@nox.session(python=PYTHON) +def lint(session: Session) -> None: + """Lint using flake8.""" + env = [ + "flake8", + "flake8-annotations", + "flake8-black", + "flake8-bugbear", + "flake8-docstrings", + "flake8-import-order", + "darglint", + ] + _setup(session) + session.install(*env) session.run("flake8", ".") + + +@nox.session(python=PYTHON) +def mypy(session: Session) -> None: + """Run `mypy` against `av2`. + + Args: + session: `nox` session. + """ + env = [ + "mypy", + "types-pyyaml", + ] + _setup(session) + session.install(*env) session.run("mypy", ".") + + +@nox.session(python=PYTHON) +def pytest(session: Session) -> None: + """Run `pytest` against `av2`. + + Args: + session: `nox` session. + """ + env = [ + "pytest", + "pytest-benchmark", + "pytest-cov", + ] + _setup(session) + session.install(*env) session.run("pytest", "tests", "--cov", "src/av2") diff --git a/guide/src/datasets/lidar.md b/src/av2/datasets/lidar/README.md similarity index 92% rename from guide/src/datasets/lidar.md rename to src/av2/datasets/lidar/README.md index a5e1bb12..3e590585 100644 --- a/guide/src/datasets/lidar.md +++ b/src/av2/datasets/lidar/README.md @@ -1,17 +1,12 @@ # Argoverse 2 Lidar Dataset Overview

- - - - + + + +

-## Table of Contents - - - -## Overview The Argoverse 2 Lidar Dataset is intended to support research into self-supervised learning in the lidar domain as well as point cloud forecasting. The AV2 Lidar Dataset is mined with the same criteria as the Forecasting Dataset to ensure that each scene is interesting. While the Lidar Dataset does not have 3D object annotations, each scenario carries an HD map with rich, 3D information about the scene. @@ -75,4 +70,4 @@ We randomly partition the dataset into the following splits: - Train (16,000 logs) - Validation (2,000 logs) -- Test (2,000 logs) \ No newline at end of file +- Test (2,000 logs) diff --git a/guide/src/datasets/motion_forecasting.md b/src/av2/datasets/motion_forecasting/README.md similarity index 84% rename from guide/src/datasets/motion_forecasting.md rename to src/av2/datasets/motion_forecasting/README.md index f124999a..5ddc7058 100644 --- a/guide/src/datasets/motion_forecasting.md +++ b/src/av2/datasets/motion_forecasting/README.md @@ -1,18 +1,12 @@ -# Overview +# Argoverse 2 Motion Forecasting -
- - - -
- -## Table of Contents - - +![](https://user-images.githubusercontent.com/29715011/158486284-1a0df794-ee0a-4ae6-a320-0dd0d1daad06.gif) +![](https://user-images.githubusercontent.com/29715011/158486286-e734e654-b879-4994-a129-9957cc591af4.gif) +![](https://user-images.githubusercontent.com/29715011/158486288-5e7c0971-de0c-4ff5-bea7-76f7922dd1e0.gif) ## Overview -The Argoverse 2 motion forecasting dataset consists of 250,000 scenarios, collected from 6 cities spanning multiple seasons. +The Argoverse 2.0 motion forecasting dataset consists of 250,000 scenarios, collected from 6 cities spanning multiple seasons. Each scenario is specifically designed to maximize interactions relevant to the ego-vehicle. This naturally results in the inclusion of actor-dense scenes featuring a range of vehicle and non-vehicle actor types. At the time of release, AV2 provides the largest object taxonomy, in addition to the broadest mapped area of any motion forecasting dataset released so far. diff --git a/guide/src/datasets/sensor.md b/src/av2/datasets/sensor/README.md similarity index 96% rename from guide/src/datasets/sensor.md rename to src/av2/datasets/sensor/README.md index fbe3dbaa..f3873de8 100644 --- a/guide/src/datasets/sensor.md +++ b/src/av2/datasets/sensor/README.md @@ -1,17 +1,6 @@ -# Sensor Dataset +# Argoverse 2 Sensor Dataset Overview -
- - - - -
- -## Table of Contents - - - -## Overview +## Dataset Size The Argoverse 2 Sensor Dataset is the successor to the Argoverse 1 3D Tracking Dataset. AV2 is larger, with 1,000 scenes totalling 4.2 hours of driving data, up from 113 scenes in Argoverse 1. diff --git a/guide/src/datasets/map_change_detection.md b/src/av2/datasets/tbv/README.md similarity index 97% rename from guide/src/datasets/map_change_detection.md rename to src/av2/datasets/tbv/README.md index 6c9023d3..71cbb989 100644 --- a/guide/src/datasets/map_change_detection.md +++ b/src/av2/datasets/tbv/README.md @@ -1,23 +1,18 @@ -# Overview +# Trust, but Verify (TbV) Dataset Overview
- - - - + + + +
-## Table of Contents - - - -## Overview +## Dataset Size The Trust, but Verify (TbV) Dataset consists of 1043 vehicle logs. Each vehicle log, on average, is 54 seconds in duration, including 536 LiDAR sweeps on average, and 1073 images from each of the 7 cameras (7512 images per log). Some logs are as short as 4 seconds, and other logs are up to 117 seconds in duration. The total dataset amounts to 15.54 hours of driving data, amounting to 922 GB of data in its extracted form. There are 7.84 Million images in the dataset (7,837,614 exactly), and 559,440 LiDAR sweeps in total. - ## Downloading TbV TbV is available for download in two forms -- either zipped up as 21 tar.gz files -- or in extracted, unzipped form (without tar archives). Downloading either will produce the same result (the underlying log data is identical). diff --git a/src/av2/evaluation/detection/SUBMISSION_FORMAT.md b/src/av2/evaluation/detection/SUBMISSION_FORMAT.md new file mode 100644 index 00000000..38312295 --- /dev/null +++ b/src/av2/evaluation/detection/SUBMISSION_FORMAT.md @@ -0,0 +1,46 @@ +# 3D Object Detection Submission Format + +The evaluation expects the following fields within a `pandas.DataFrame`: + +- `tx_m`: x-component of the object translation in the egovehicle reference frame. +- `ty_m`: y-component of the object translation in the egovehicle reference frame. +- `tz_m`: z-component of the object translation in the egovehicle reference frame. +- `length_m`: Object extent along the x-axis in meters. +- `width_m`: Object extent along the y-axis in meters. +- `height_m`: Object extent along the z-axis in meters. +- `qw`: Real quaternion coefficient. +- `qx`: First quaternion coefficient. +- `qy`: Second quaternion coefficient. +- `qz`: Third quaternion coefficient. +- `score`: Object confidence. +- `log_id`: Log id associated with the detection. +- `timestamp_ns`: Timestamp associated with the detection. +- `category`: Object category. + +An example looks like this: + +```python +# These detections are only for example purposes. + +display(detections) # Detections is type `pd.DataFrame` + tx_m ty_m tz_m length_m width_m height_m qw qx qy qz score log_id timestamp_ns category +0 -162.932968 1.720428 0.039064 1.596262 0.772320 1.153996 0.125843 0.0 0.0 0.992050 0.127634 b0116f1c-f88f-3c09-b4bf-fc3c8ebeda56 315968193659921000 WHEELCHAIR +1 -120.362213 19.875946 -0.382618 1.441901 0.593825 1.199819 0.802836 0.0 0.0 0.596200 0.126565 b0116f1c-f88f-3c09-b4bf-fc3c8ebeda56 315968193659921000 BICYCLE +... +14000000 10.182907 29.489899 0.662969 9.166531 1.761454 1.615999 0.023469 0.0 0.0 -0.999725 0.322177 b2d9d8a5-847b-3c3b-aed1-c414319d20af 315978610360111000 REGULAR_VEHICLE + +detections.columns +Index(['tx_m', 'ty_m', 'tz_m', 'length_m', 'width_m', 'height_m', 'qw', 'qx', + 'qy', 'qz', 'score', 'log_id', 'timestamp_ns', 'category'], + dtype='object') +``` + +We need to export the above dataframe for submission. This can be done by: + +```python +import pandas as pd + +detections.to_feather("detections.feather") +``` + +Lastly, submit this file to the competition leaderboard. \ No newline at end of file diff --git a/src/av2/evaluation/scene_flow/SUBMISSION_FORMAT.md b/src/av2/evaluation/scene_flow/SUBMISSION_FORMAT.md new file mode 100644 index 00000000..20fbe109 --- /dev/null +++ b/src/av2/evaluation/scene_flow/SUBMISSION_FORMAT.md @@ -0,0 +1,42 @@ +# 3D Scene Flow Submission Format + +The evaluation expects a zip archive of [Apache Feather](https://arrow.apache.org/docs/python/feather.html) files --- one for each example. The unzipped directory must have the format: + +```terminal +- / + - .feather + - .feather + - ... +- / +- ... +``` + +The evaluation is run on a subset of the test set. Use the utility function `get_eval_subset` to get the `SceneFlowDataloader` indices to submit. Each feather file should contain your flow predictions for the subset of points returned by `get_eval_mask` in the format: + +- `flow_tx_m` (float16): x-component of the flow (in meters) in the first sweeps's ego-vehicle reference frame. +- `flow_ty_m` (float16): y-component of the flow (in meters) in the first sweeps's ego-vehicle reference frame. +- `flow_tz_m` (float16): z-component of the flow (in meters) in the first sweeps's ego-vehicle reference frame. +- `is_dynamic` (bool): Predicted dynamic/static labels for each point. A point is considered dynamic if its ground truth flow has a $\ell^2$-norm greater then $0.05 \textit{ m}$ once ego-motion has been removed. + + +For example, the first log in the test set is `0c6e62d7-bdfa-3061-8d3d-03b13aa21f68` and the first timestamp is `315971435999927221`, so there should be a folder and file in the archive of the form: `0c6e62d7-bdfa-3061-8d3d-03b13aa21f68/315971435999927221.feather`. That fill should look like this: +```python + flow_tx_m flow_ty_m flow_tz_m +0 -0.699219 0.002869 0.020233 +1 -0.699219 0.002790 0.020493 +2 -0.699219 0.002357 0.020004 +3 -0.701172 0.001650 0.013390 +4 -0.699219 0.002552 0.020187 +... ... ... ... +68406 -0.703613 -0.001801 0.002373 +68407 -0.704102 -0.000905 0.002567 +68408 -0.704590 -0.001390 0.000397 +68409 -0.704102 -0.001608 0.002283 +68410 -0.704102 -0.001619 0.002207 +``` +The file `example_submission.py` contains a basic example of how to output the submission files. The script `make_submission_archive.py` will create the zip archive for you and validate the submission format. Then you can submit the outputted file to the competition leaderboard! + + +# Local Evaluation + +Before evaluating on the _test_ set, you will want to evaluate your model on the _validation_ set. To do this, first run `make_annotation_files.py` to create a set of files containing the minimum ground truth flow information needed to run the evaluation. Then, once you have your output saved in the feather files described above, run `eval.py` to compute all of the leaderboard metrics. diff --git a/guide/src/api/hd_maps.md b/src/av2/map/README.md similarity index 75% rename from guide/src/api/hd_maps.md rename to src/av2/map/README.md index 6995fb39..25a3f9b4 100644 --- a/guide/src/api/hd_maps.md +++ b/src/av2/map/README.md @@ -1,14 +1,16 @@ -# HD Maps - -

- - -

+# Maps for Argoverse 2.0 ## Table of Contents - +- [Overview](#overview) +- [Map Counts](#map-counts) +- [Vector Map: Lane Graph and Lane Segments](#lane-segments) +- [Vector Map: Drivable Area](#drivable-area) +- [Vector Map: Pedestrian Crossings](#ped-crossings) +- [Area of Local Maps](#area-of-local-maps) +- [Raster Maps: Ground surface height](#ground-height) +- [Training Online Map Inference Models](#training-online-map-inference-models) ## Overview @@ -21,20 +23,20 @@ Each scenario in the three datasets described above shares the same HD map repre ## Map Counts -Argoverse 2 offers a massive number of highly diverse HD maps: +Argoverse 2.0 offers a massive number of highly diverse HD maps: - **Motion Forecasting Dataset**: ~250,000 vector maps. - **Sensor Dataset**: 1,000 vector maps and 1,000 ground height raster maps. - **LiDAR Dataset**: 20,000 vector maps. - **TbV Dataset**: 1,038 vector maps and 1,038 ground height raster maps. -The core data structure that holds Argoverse 2 map data is the [`ArgoverseStaticMap`](map_api.py#280) class. Please refer to the [map tutorial notebook](../../../tutorials/map_tutorial.ipynb) for more examples of how to use the map API. +The core data structure that holds Argoverse 2.0 map data is the [`ArgoverseStaticMap`](map_api.py#280) class. Please refer to the [map tutorial notebook](../../../tutorials/map_tutorial.ipynb) for more examples of how to use the map API. ## Vector Map: Lane Graph and Lane Segments -The core feature of the HD map is the lane graph, consisting of a graph $G = (V, E)$, where $V$ are individual lane segments. In the [supplemental material](https://openreview.net/attachment?id=vKQGe36av4k&name=supplementary_material), we enumerate and define the attributes we provide for each lane segment. Unlike Argoverse 1, we provide the actual 3D lane boundaries, instead of only centerlines. However, our API provides code to quickly infer the centerlines at any desired sampling resolution. Polylines are quantized to $1 \text{ cm}$ resolution in the release. +The core feature of the HD map is the lane graph, consisting of a graph G = (V, E), where V are individual lane segments. In the [supplemental material](https://openreview.net/attachment?id=vKQGe36av4k&name=supplementary_material), we enumerate and define the attributes we provide for each lane segment. Unlike Argoverse 1, we provide the actual 3D lane boundaries, instead of only centerlines. However, our API provides code to quickly infer the centerlines at any desired sampling resolution. Polylines are quantized to 1 cm resolution in the release.

@@ -55,8 +57,8 @@ Please refer to the [`LaneSegment`](lane_segment.py#L71) class, with the followi - `id`: unique identifier for this lane segment (guaranteed to be unique only within this local map). - `is_intersection`: boolean value representing whether or not this lane segment lies within an intersection. - `lane_type`: designation of which vehicle types may legally utilize this lane for travel (see [`LaneType`](lane_segment.py#L23)). -- `right_lane_boundary`: 3D polyline representing the right lane boundary (see [`Polyline`](map_primitives.py#L37)). -- `left_lane_boundary`: 3D polyline representing the left lane boundary. +- `right_lane_boundary`: 3d polyline representing the right lane boundary (see [`Polyline`](map_primitives.py#L37)). +- `left_lane_boundary`: 3d polyline representing the left lane boundary. - `right_mark_type`: type of painted marking found along the right lane boundary (see [`LaneMarkType`](lane_segment.py#L31)). - `left_mark_type`: type of painted marking found along the left lane boundary. - `predecessors`: unique identifiers of lane segments that are predecessors of this object. @@ -64,10 +66,10 @@ Please refer to the [`LaneSegment`](lane_segment.py#L71) class, with the followi - `right_neighbor_id`: unique identifier of the lane segment representing this object's right neighbor. - `left_neighbor_id`: unique identifier of the lane segment representing this object's left neighbor. - +

@@ -78,7 +80,7 @@ Instead of providing drivable area segmentation in a rasterized format, as we di Please refer to the [`DrivableArea`](drivable_area.py#L17) class, with the following attributes: - `id`: unique identifier. -- `area_boundary`: 3D vertices of polygon, representing the drivable area's boundary. +- `area_boundary`: 3d vertices of polygon, representing the drivable area's boundary. @@ -89,18 +91,18 @@ These entities represent crosswalks, and are provided in vector format. They are Please refer to the [`PedestrianCrossing`](pedestrian_crossing.py#L17) class, with the following attributes: - `id`: unique identifier of pedestrian crossing. -- `edge1`: 3D polyline representing one edge of the crosswalk, with 2 waypoints. -- `edge2`: 3D polyline representing the other edge of the crosswalk, with 2 waypoints. +- `edge1`: 3d polyline representing one edge of the crosswalk, with 2 waypoints. +- `edge2`: 3d polyline representing the other edge of the crosswalk, with 2 waypoints. ## Area of Local Maps -Each scenario’s local map includes all entities found within a $100 \text{ m}$ dilation in $\ell_2$-norm from the ego-vehicle trajectory. +Each scenario’s local map includes all entities found within a 100 m dilation in l2-norm from the ego-vehicle trajectory. ## Raster Maps: Ground Surface Height -Only the AV2 Sensor Dataset and TbV includes a dense ground surface height map. (The AV2 LiDAR dataset and AV2 Motion Forecasting (MF) datasets **do not** come up with raster maps, but still have sparse 3D height information on polylines). +Only the AV 2.0 Sensor Dataset and TbV includes a dense ground surface height map. (The AV 2.0 LiDAR dataset and AV 2.0 Motion Forecasting (MF) datasets **do not** come up with raster maps, but still have sparse 3D height information on polylines).

@@ -132,9 +134,9 @@ points_z = avm.raster_ground_height_layer.get_ground_height_at_xy(points_xy) ## Training Online Map Inference Models -Argoverse 2 offers new opportunities for training online map inference models, as the largest source of paired sensor data and HD maps publicly available at the time of release. +Argoverse 2.0 offers new opportunities for training online map inference models, as the largest source of paired sensor data and HD maps publicly available at the time of release. -However, a few Sensor Dataset logs intentionally feature HD map changes: +However, a few Sensor 2.0 Dataset logs intentionally feature HD map changes: 1. `75e8adad-50a6-3245-8726-5e612db3d165` 2. `54bc6dbc-ebfb-3fba-b5b3-57f88b4b79ca` diff --git a/tutorials/3d_object_detection.py b/tutorials/detection_data_loader.py similarity index 100% rename from tutorials/3d_object_detection.py rename to tutorials/detection_data_loader.py