Skip to content

Commit

Permalink
Synthetic Semantic Segmentation Benchmark (#820)
Browse files Browse the repository at this point in the history
  • Loading branch information
czaloom authored Nov 13, 2024
1 parent 582eb93 commit 300886e
Show file tree
Hide file tree
Showing 11 changed files with 1,077 additions and 8 deletions.
1 change: 0 additions & 1 deletion .github/workflows/lite-benchmark-evaluations.yml
Original file line number Diff line number Diff line change
Expand Up @@ -35,4 +35,3 @@ jobs:
export BENCHMARK_RESULTS=$(python -c "import os;import json;print(json.dumps(json.load(open('objdet_results.json', 'r')), indent=4));")
echo "$BENCHMARK_RESULTS"
working-directory: ./lite/benchmarks/
- run: make stop-env
24 changes: 24 additions & 0 deletions .github/workflows/lite-synthetic-benchmarks.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
name: "[valor-lite] synthetic benchmarks"

on:
push:
branches: "**"

permissions:
id-token: write
contents: read

jobs:
run-benchmarks:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: "3.10"
- name: install lite
run: pip install -e .
working-directory: ./lite
- name: benchmark semantic segmentation
run: python benchmark_semantic_segmentation.py
working-directory: ./lite/benchmarks/synthetic/
94 changes: 94 additions & 0 deletions lite/benchmarks/synthetic/benchmark_semantic_segmentation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,94 @@
from valor_lite.profiling import Benchmark, BenchmarkError
from valor_lite.semantic_segmentation.benchmark import (
benchmark_add_data,
benchmark_evaluate,
benchmark_finalize,
)


def benchmark(
bitmask_shape: tuple[int, int],
number_of_unique_labels: int,
number_of_images: int,
*_,
memory_limit: float = 4.0,
time_limit: float = 10.0,
repeat: int = 1,
verbose: bool = False,
):
"""
Runs a single benchmark.
Parameters
----------
bitmask_shape : tuple[int, int]
The size (h, w) of the bitmask array.
number_of_unique_labels : int
The number of unique labels used in the synthetic example.
number_of_images : int
The number of distinct datums that are created.
memory_limit : float
The maximum amount of system memory allowed in gigabytes (GB).
time_limit : float
The maximum amount of time permitted before killing the benchmark.
repeat : int
The number of times to run a benchmark to produce an average runtime.
verbose : bool, default=False
Toggles terminal output of benchmark results.
"""

b = Benchmark(
time_limit=time_limit,
memory_limit=int(memory_limit * (1024**3)),
repeat=repeat,
verbose=verbose,
)

_, failed, details = b.run(
benchmark=benchmark_add_data,
n_labels=[number_of_unique_labels],
shape=[bitmask_shape],
)
if failed:
raise BenchmarkError(
benchmark=details["benchmark"],
error_type=failed[0]["error"],
error_message=failed[0]["msg"],
)

_, failed, details = b.run(
benchmark=benchmark_finalize,
n_datums=[number_of_images],
n_labels=[number_of_unique_labels],
)
if failed:
raise BenchmarkError(
benchmark=details["benchmark"],
error_type=failed[0]["error"],
error_message=failed[0]["msg"],
)

_, failed, details = b.run(
benchmark=benchmark_evaluate,
n_datums=[number_of_images],
n_labels=[number_of_unique_labels],
)
if failed:
raise BenchmarkError(
benchmark=details["benchmark"],
error_type=failed[0]["error"],
error_message=failed[0]["msg"],
)


if __name__ == "__main__":

benchmark(
bitmask_shape=(4000, 4000),
number_of_images=1000,
number_of_unique_labels=10,
memory_limit=4.0,
time_limit=10.0,
repeat=1,
verbose=True,
)
Loading

0 comments on commit 300886e

Please sign in to comment.