Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Change line length. #176

Merged
merged 1 commit into from
May 1, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 0 additions & 4 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -61,11 +61,7 @@ repository = "https://github.com/argoverse/av2-api"
features = ["pyo3/extension-module"]
module-name = "av2._r"

[tool.black]
line-length = 120

[tool.ruff]
line-length = 120
select = [
"D",
]
Expand Down
47 changes: 36 additions & 11 deletions src/av2/datasets/motion_forecasting/eval/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,9 @@
from av2.utils.typing import NDArrayBool, NDArrayFloat


def compute_ade(forecasted_trajectories: NDArrayFloat, gt_trajectory: NDArrayFloat) -> NDArrayFloat:
def compute_ade(
forecasted_trajectories: NDArrayFloat, gt_trajectory: NDArrayFloat
) -> NDArrayFloat:
"""Compute the average displacement error for a set of K predicted trajectories (for the same actor).

Args:
Expand All @@ -18,12 +20,16 @@ def compute_ade(forecasted_trajectories: NDArrayFloat, gt_trajectory: NDArrayFlo
Returns:
(K,) Average displacement error for each of the predicted trajectories.
"""
displacement_errors = np.linalg.norm(forecasted_trajectories - gt_trajectory, axis=2)
displacement_errors = np.linalg.norm(
forecasted_trajectories - gt_trajectory, axis=2
)
ade: NDArrayFloat = np.mean(displacement_errors, axis=1)
return ade


def compute_fde(forecasted_trajectories: NDArrayFloat, gt_trajectory: NDArrayFloat) -> NDArrayFloat:
def compute_fde(
forecasted_trajectories: NDArrayFloat, gt_trajectory: NDArrayFloat
) -> NDArrayFloat:
"""Compute the final displacement error for a set of K predicted trajectories (for the same actor).

Args:
Expand Down Expand Up @@ -78,7 +84,9 @@ def compute_brier_ade(
(K,) Probability-weighted average displacement error for each predicted trajectory.
"""
# Compute ADE with Brier score component
brier_score = _compute_brier_score(forecasted_trajectories, forecast_probabilities, normalize)
brier_score = _compute_brier_score(
forecasted_trajectories, forecast_probabilities, normalize
)
ade_vector = compute_ade(forecasted_trajectories, gt_trajectory)
brier_ade: NDArrayFloat = ade_vector + brier_score
return brier_ade
Expand All @@ -102,7 +110,9 @@ def compute_brier_fde(
(K,) Probability-weighted final displacement error for each predicted trajectory.
"""
# Compute FDE with Brier score component
brier_score = _compute_brier_score(forecasted_trajectories, forecast_probabilities, normalize)
brier_score = _compute_brier_score(
forecasted_trajectories, forecast_probabilities, normalize
)
fde_vector = compute_fde(forecasted_trajectories, gt_trajectory)
brier_fde: NDArrayFloat = fde_vector + brier_score
return brier_fde
Expand Down Expand Up @@ -136,7 +146,9 @@ def _compute_brier_score(

# Validate that all forecast probabilities are in the range [0, 1]
if np.logical_or(forecast_probabilities < 0.0, forecast_probabilities > 1.0).any():
raise ValueError("At least one forecast probability falls outside the range [0, 1].")
raise ValueError(
"At least one forecast probability falls outside the range [0, 1]."
)

# If enabled, normalize forecast probabilities to sum to 1
if normalize:
Expand All @@ -146,7 +158,9 @@ def _compute_brier_score(
return brier_score


def compute_world_fde(forecasted_world_trajectories: NDArrayFloat, gt_world_trajectories: NDArrayFloat) -> NDArrayFloat:
def compute_world_fde(
forecasted_world_trajectories: NDArrayFloat, gt_world_trajectories: NDArrayFloat
) -> NDArrayFloat:
"""Compute the mean final displacement error for each of K predicted worlds.

Args:
Expand All @@ -167,7 +181,9 @@ def compute_world_fde(forecasted_world_trajectories: NDArrayFloat, gt_world_traj
return world_fdes


def compute_world_ade(forecasted_world_trajectories: NDArrayFloat, gt_world_trajectories: NDArrayFloat) -> NDArrayFloat:
def compute_world_ade(
forecasted_world_trajectories: NDArrayFloat, gt_world_trajectories: NDArrayFloat
) -> NDArrayFloat:
"""Compute the mean average displacement error for each of K predicted worlds.

Args:
Expand All @@ -189,7 +205,9 @@ def compute_world_ade(forecasted_world_trajectories: NDArrayFloat, gt_world_traj


def compute_world_misses(
forecasted_world_trajectories: NDArrayFloat, gt_world_trajectories: NDArrayFloat, miss_threshold_m: float = 2.0
forecasted_world_trajectories: NDArrayFloat,
gt_world_trajectories: NDArrayFloat,
miss_threshold_m: float = 2.0,
) -> NDArrayBool:
"""For each world, compute whether predictions for each actor misssed by more than a distance threshold.

Expand Down Expand Up @@ -230,7 +248,12 @@ def compute_world_brier_fde(
(K,) Mean probability-weighted final displacement error for each of the predicted worlds.
"""
actor_brier_fdes = [
compute_brier_fde(forecasted_actor_trajectories, gt_actor_trajectory, forecasted_world_probabilities, normalize)
compute_brier_fde(
forecasted_actor_trajectories,
gt_actor_trajectory,
forecasted_world_probabilities,
normalize,
)
for forecasted_actor_trajectories, gt_actor_trajectory in zip(
forecasted_world_trajectories, gt_world_trajectories
)
Expand All @@ -256,7 +279,9 @@ def compute_world_collisions(
for actor_idx in range(len(forecasted_world_trajectories)):
# Compute distance from current actor to all other predicted actors at each timestep
forecasted_actor_trajectories = forecasted_world_trajectories[actor_idx]
scenario_actor_dists = np.linalg.norm(forecasted_world_trajectories - forecasted_actor_trajectories, axis=-1)
scenario_actor_dists = np.linalg.norm(
forecasted_world_trajectories - forecasted_actor_trajectories, axis=-1
)

# For each world, find the closest distance to any other predicted actor, at any time
scenario_actor_dists[actor_idx, :, :] = np.inf
Expand Down
31 changes: 24 additions & 7 deletions src/av2/datasets/motion_forecasting/eval/submission.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,10 @@ def __post_init__(self) -> None:
ValueError: If for any track, number of probabilities doesn't match the number of predicted trajectories.
ValueError: If prediction probabilities for at least one scenario do not sum to 1.
"""
for scenario_id, (scenario_probabilities, scenario_trajectories) in self.predictions.items():
for scenario_id, (
scenario_probabilities,
scenario_trajectories,
) in self.predictions.items():
for track_id, track_trajectories in scenario_trajectories.items():
# Validate that predicted trajectories are of the correct shape
if track_trajectories[0].shape[-2:] != EXPECTED_PREDICTION_SHAPE:
Expand Down Expand Up @@ -94,13 +97,24 @@ def from_parquet(cls, submission_file_path: Path) -> ChallengeSubmission:
for scenario_id, scenario_df in submission_df.groupby("scenario_id"):
scenario_trajectories: ScenarioTrajectories = {}
for track_id, track_df in scenario_df.groupby("track_id"):
predicted_trajectories_x = np.stack(track_df.loc[:, "predicted_trajectory_x"].values.tolist())
predicted_trajectories_y = np.stack(track_df.loc[:, "predicted_trajectory_y"].values.tolist())
predicted_trajectories = np.stack((predicted_trajectories_x, predicted_trajectories_y), axis=-1)
predicted_trajectories_x = np.stack(
track_df.loc[:, "predicted_trajectory_x"].values.tolist()
)
predicted_trajectories_y = np.stack(
track_df.loc[:, "predicted_trajectory_y"].values.tolist()
)
predicted_trajectories = np.stack(
(predicted_trajectories_x, predicted_trajectories_y), axis=-1
)
scenario_trajectories[track_id] = predicted_trajectories

scenario_probabilities = np.array(track_df.loc[:, "probability"].values.tolist())
submission_dict[scenario_id] = (scenario_probabilities, scenario_trajectories)
scenario_probabilities = np.array(
track_df.loc[:, "probability"].values.tolist()
)
submission_dict[scenario_id] = (
scenario_probabilities,
scenario_trajectories,
)

return cls(predictions=submission_dict)

Expand All @@ -113,7 +127,10 @@ def to_parquet(self, submission_file_path: Path) -> None:
prediction_rows: List[PredictionRow] = []

# Build list of rows for the submission dataframe
for scenario_id, (scenario_probabilities, scenario_trajectories) in self.predictions.items():
for scenario_id, (
scenario_probabilities,
scenario_trajectories,
) in self.predictions.items():
for track_id, track_trajectories in scenario_trajectories.items():
for world_idx in range(len(track_trajectories)):
prediction_rows.append(
Expand Down
31 changes: 24 additions & 7 deletions src/av2/datasets/motion_forecasting/scenario_serialization.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,18 @@
import numpy as np
import pandas as pd

from av2.datasets.motion_forecasting.data_schema import ArgoverseScenario, ObjectState, ObjectType, Track, TrackCategory


def serialize_argoverse_scenario_parquet(save_path: Path, scenario: ArgoverseScenario) -> None:
from av2.datasets.motion_forecasting.data_schema import (
ArgoverseScenario,
ObjectState,
ObjectType,
Track,
TrackCategory,
)


def serialize_argoverse_scenario_parquet(
save_path: Path, scenario: ArgoverseScenario
) -> None:
"""Serialize a single Argoverse scenario in parquet format and save to disk.

Args:
Expand Down Expand Up @@ -64,7 +72,9 @@ def load_argoverse_scenario_parquet(scenario_path: Path) -> ArgoverseScenario:

# Interpolate scenario timestamps based on the saved start and end timestamps
timestamps_ns = np.linspace(
tracks_df["start_timestamp"][0], tracks_df["end_timestamp"][0], num=tracks_df["num_timestamps"][0]
tracks_df["start_timestamp"][0],
tracks_df["end_timestamp"][0],
num=tracks_df["num_timestamps"][0],
)

return ArgoverseScenario(
Expand Down Expand Up @@ -139,7 +149,9 @@ def _load_tracks_from_tabular_format(tracks_df: pd.DataFrame) -> List[Track]:
for track_id, track_df in tracks_df.groupby("track_id"):
observed_states: List[bool] = track_df.loc[:, "observed"].values.tolist()
object_type: ObjectType = ObjectType(track_df["object_type"].iloc[0])
object_category: TrackCategory = TrackCategory(track_df["object_category"].iloc[0])
object_category: TrackCategory = TrackCategory(
track_df["object_category"].iloc[0]
)
timesteps: List[int] = track_df.loc[:, "timestep"].values.tolist()
positions: List[Tuple[float, float]] = list(
zip(
Expand Down Expand Up @@ -168,7 +180,12 @@ def _load_tracks_from_tabular_format(tracks_df: pd.DataFrame) -> List[Track]:
)

tracks.append(
Track(track_id=track_id, object_states=object_states, object_type=object_type, category=object_category)
Track(
track_id=track_id,
object_states=object_states,
object_type=object_type,
category=object_category,
)
)

return tracks
Loading