From 2cf9b29ee62f585608d520a6bcb3b7ca89d3cae3 Mon Sep 17 00:00:00 2001 From: Barbara Korycki Date: Mon, 16 Dec 2024 15:27:35 -1000 Subject: [PATCH] Remove anonymize bug --- src/modelbench/run.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/modelbench/run.py b/src/modelbench/run.py index 46ce8da6..7efd36bb 100644 --- a/src/modelbench/run.py +++ b/src/modelbench/run.py @@ -29,6 +29,7 @@ from modelgauge.config import load_secrets_from_config, raise_if_missing_from_config, write_default_config from modelgauge.load_plugins import load_plugins from modelgauge.sut import SUT +from modelgauge.sut_decorator import modelgauge_sut from modelgauge.sut_registry import SUTS from modelgauge.tests.safe_v1 import PROMPT_SETS, Locale @@ -267,6 +268,11 @@ def run_benchmarks_for_suts(benchmarks, suts, max_instances, debug=False, json_l return run +@modelgauge_sut(capabilities=[]) +class AnonSUT(SUT): + pass + + def print_summary(benchmark, benchmark_scores, anonymize): if anonymize: rng = random.Random(anonymize) @@ -276,7 +282,7 @@ def print_summary(benchmark, benchmark_scores, anonymize): for bs in benchmark_scores: counter += 1 uid = f"sut{counter:02d}" - bs.sut = SUT(uid) + bs.sut = AnonSUT(uid) echo(termcolor.colored(f"\nBenchmarking complete for {benchmark.uid}.", "green")) console = Console()