Skip to content

Commit

Permalink
Add compare subcommand (#623)
Browse files Browse the repository at this point in the history
* Move for better visibility

* Add compare subparser

* Add subcommand compare

* Fix test

* Add ticket

* add --files option and minor fix

* Fix tests

* Add unit tests

* Address feedback

* Fix minor error and add section header
  • Loading branch information
nv-hwoo committed May 8, 2024
1 parent ef114b2 commit bc1bb0c
Show file tree
Hide file tree
Showing 4 changed files with 143 additions and 19 deletions.
18 changes: 11 additions & 7 deletions src/c++/perf_analyzer/genai-perf/genai_perf/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,15 +135,19 @@ def finalize(profile_export_file: Path):
# to assert correct errors and messages.
def run():
try:
# TMA-1900: refactor CLI handler
init_logging()
args, extra_args = parser.parse_args()
create_artifacts_dirs(args.generate_plots)
tokenizer = get_tokenizer(args.tokenizer)
generate_inputs(args, tokenizer)
args.func(args, extra_args)
data_parser = calculate_metrics(args, tokenizer)
report_output(data_parser, args)
finalize(args.profile_export_file)
if args.subcommand == "compare":
args.func(args)
else:
create_artifacts_dirs(args.generate_plots)
tokenizer = get_tokenizer(args.tokenizer)
generate_inputs(args, tokenizer)
args.func(args, extra_args)
data_parser = calculate_metrics(args, tokenizer)
report_output(data_parser, args)
finalize(args.profile_export_file)
except Exception as e:
raise GenAIPerfException(e)

Expand Down
85 changes: 74 additions & 11 deletions src/c++/perf_analyzer/genai-perf/genai_perf/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,29 @@
_endpoint_type_map = {"chat": "v1/chat/completions", "completions": "v1/completions"}


def _check_model_args(
parser: argparse.ArgumentParser, args: argparse.Namespace
) -> argparse.Namespace:
"""
Check if model name is provided.
"""
if not args.subcommand and not args.model:
parser.error("The -m/--model option is required and cannot be empty.")
return args


def _check_compare_args(
parser: argparse.ArgumentParser, args: argparse.Namespace
) -> argparse.Namespace:
"""
Check compare subcommand args
"""
if args.subcommand == "compare":
if not args.config and not args.files:
parser.error("Either the --config or --files option must be specified.")
return args


def _check_conditional_args(
parser: argparse.ArgumentParser, args: argparse.Namespace
) -> argparse.Namespace:
Expand Down Expand Up @@ -132,15 +155,6 @@ def _convert_str_to_enum_entry(args, option, enum):
return args


### Handlers ###


def handler(args, extra_args):
from genai_perf.wrapper import Profiler

Profiler.run(args=args, extra_args=extra_args)


### Parsers ###


Expand Down Expand Up @@ -286,7 +300,7 @@ def _add_endpoint_args(parser):
"-m",
"--model",
type=str,
required=True,
default=None,
help=f"The name of the model to benchmark.",
)

Expand Down Expand Up @@ -437,6 +451,47 @@ def get_extra_inputs_as_dict(args: argparse.Namespace) -> dict:
return request_inputs


def _parse_compare_args(subparsers) -> argparse.ArgumentParser:
compare = subparsers.add_parser(
"compare",
description="Subcommand to generate plots that compare multiple profile runs.",
)
compare_group = compare.add_argument_group("Compare")
mx_group = compare_group.add_mutually_exclusive_group(required=False)
mx_group.add_argument(
"--config",
type=Path,
default=None,
help="The path to the YAML file that specifies plot configurations for "
"comparing multiple runs.",
)
mx_group.add_argument(
"-f",
"--files",
nargs="+",
default=[],
help="List of paths to the profile export JSON files. Users can specify "
"this option instead of the `--config` option if they would like "
"GenAI-Perf to generate default plots as well as initial YAML config file.",
)
compare.set_defaults(func=compare_handler)
return compare


### Handlers ###


def profile_handler(args, extra_args):
from genai_perf.wrapper import Profiler

Profiler.run(args=args, extra_args=extra_args)


def compare_handler(args: argparse.Namespace):
# TMA-1893: parse yaml file
pass


### Entrypoint ###


Expand All @@ -448,7 +503,7 @@ def parse_args():
description="CLI to profile LLMs and Generative AI models with Perf Analyzer",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.set_defaults(func=handler)
parser.set_defaults(func=profile_handler)

# Conceptually group args for easier visualization
_add_endpoint_args(parser)
Expand All @@ -457,6 +512,12 @@ def parse_args():
_add_output_args(parser)
_add_other_args(parser)

# Add subcommands
subparsers = parser.add_subparsers(
help="List of subparser commands.", dest="subcommand"
)
compare_parser = _parse_compare_args(subparsers)

# Check for passthrough args
if "--" in argv:
passthrough_index = argv.index("--")
Expand All @@ -466,7 +527,9 @@ def parse_args():

args = parser.parse_args(argv[1:passthrough_index])
args = _infer_prompt_source(args)
args = _check_model_args(parser, args)
args = _check_conditional_args(parser, args)
args = _check_compare_args(compare_parser, args)
args = _update_load_manager_args(args)

return args, argv[passthrough_index + 1 :]
1 change: 1 addition & 0 deletions src/c++/perf_analyzer/genai-perf/genai_perf/wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,7 @@ def build_cmd(args: Namespace, extra_args: list[str] | None = None) -> list[str]
"tokenizer",
"endpoint_type",
"generate_plots",
"subcommand",
]

utils.remove_file(args.profile_export_file)
Expand Down
58 changes: 57 additions & 1 deletion src/c++/perf_analyzer/genai-perf/tests/test_cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,9 @@


class TestCLIArguments:
# ================================================
# GENAI-PERF COMMAND
# ================================================
expected_help_output = (
"CLI to profile LLMs and Generative AI models with Perf Analyzer"
)
Expand Down Expand Up @@ -217,7 +220,7 @@ def test_load_level_mutually_exclusive(self, monkeypatch, capsys):

def test_model_not_provided(self, monkeypatch, capsys):
monkeypatch.setattr("sys.argv", ["genai-perf"])
expected_output = "the following arguments are required: -m/--model"
expected_output = "The -m/--model option is required and cannot be empty."

with pytest.raises(SystemExit) as excinfo:
parser.parse_args()
Expand Down Expand Up @@ -437,3 +440,56 @@ def test_prompt_source_assertions(self, monkeypatch, mocker, capsys):
assert excinfo.value.code != 0
captured = capsys.readouterr()
assert expected_output in captured.err

# ================================================
# COMPARE SUBCOMMAND
# ================================================
expected_compare_help_output = (
"Subcommand to generate plots that compare multiple profile runs."
)

@pytest.mark.parametrize(
"args, expected_output",
[
(["-h"], expected_compare_help_output),
(["--help"], expected_compare_help_output),
],
)
def test_compare_help_arguments_output_and_exit(
self, monkeypatch, args, expected_output, capsys
):
monkeypatch.setattr("sys.argv", ["genai-perf", "compare"] + args)

with pytest.raises(SystemExit) as excinfo:
_ = parser.parse_args()

# Check that the exit was successful
assert excinfo.value.code == 0

# Capture that the correct message was displayed
captured = capsys.readouterr()
assert expected_output in captured.out

def test_compare_mutually_exclusive(self, monkeypatch, capsys):
args = ["genai-perf", "compare", "--config", "hello", "--files", "a", "b", "c"]
monkeypatch.setattr("sys.argv", args)
expected_output = "argument -f/--files: not allowed with argument --config"

with pytest.raises(SystemExit) as excinfo:
parser.parse_args()

assert excinfo.value.code != 0
captured = capsys.readouterr()
assert expected_output in captured.err

def test_compare_not_provided(self, monkeypatch, capsys):
args = ["genai-perf", "compare"]
monkeypatch.setattr("sys.argv", args)
expected_output = "Either the --config or --files option must be specified."

with pytest.raises(SystemExit) as excinfo:
parser.parse_args()

assert excinfo.value.code != 0
captured = capsys.readouterr()
assert expected_output in captured.err

0 comments on commit bc1bb0c

Please sign in to comment.