Skip to content

Commit

Permalink
Remove unused param from Engine (microsoft#845)
Browse files Browse the repository at this point in the history
## Describe your changes

Remove unused param from Engine

## Checklist before requesting a review
- [ ] Add unit tests for this change.
- [ ] Make sure all tests can pass.
- [ ] Update documents if necessary.
- [ ] Lint and apply fixes to your code by running `lintrunner -a`
- [ ] Is this a user-facing change? If yes, give a description of this
change to be included in the release notes.

## (Optional) Issue link
  • Loading branch information
xiaoyu-work authored Dec 22, 2023
1 parent 855be5d commit b13a897
Show file tree
Hide file tree
Showing 6 changed files with 6 additions and 19 deletions.
4 changes: 0 additions & 4 deletions olive/engine/engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,6 @@ def __init__(
host: Optional[OliveSystem] = None,
target: Optional[OliveSystem] = None,
evaluator_config: Optional["OliveEvaluatorConfig"] = None,
execution_providers: Optional[List[str]] = None,
):
self._config = validate_config(config, EngineConfig)

Expand Down Expand Up @@ -94,14 +93,11 @@ def __init__(

# {"pass_name": {"pass": pass, "host": host, "evaluator": evaluator, "clean_run_cache": clean_run_cache}}
self.passes = OrderedDict()

self.pass_flows = None
self.pass_flows_search_spaces = None

self.footprints = defaultdict(Footprint)

self.azureml_client_config = self._config.azureml_client_config

self._initialized = False

def initialize(self):
Expand Down
1 change: 1 addition & 0 deletions olive/workflows/run/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ def create_engine(self):
config = self.dict()
to_del = [
"evaluate_input_model",
"execution_providers",
"output_dir",
"output_name",
"packaging_config",
Expand Down
3 changes: 1 addition & 2 deletions test/multiple_ep/test_aml_system.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,8 +54,7 @@ def test_run_pass_evaluate(self, tmpdir):

metric = get_latency_metric()
evaluator_config = OliveEvaluatorConfig(metrics=[metric])
options = {"execution_providers": self.execution_providers}
engine = Engine(options, target=self.system, host=self.system, evaluator_config=evaluator_config)
engine = Engine(target=self.system, host=self.system, evaluator_config=evaluator_config)
accelerator_specs = create_accelerators(self.system, self.execution_providers)
engine.register(OrtPerfTuning)
output = engine.run(self.input_model_config, accelerator_specs, output_dir=output_dir)
Expand Down
3 changes: 1 addition & 2 deletions test/multiple_ep/test_docker_system.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,8 +40,7 @@ def test_run_pass_evaluate(self, tmpdir):

metric = get_latency_metric()
evaluator_config = OliveEvaluatorConfig(metrics=[metric])
options = {"execution_providers": self.execution_providers}
engine = Engine(options, target=self.system, evaluator_config=evaluator_config)
engine = Engine(target=self.system, evaluator_config=evaluator_config)
accelerator_specs = create_accelerators(self.system, self.execution_providers)
engine.register(OrtPerfTuning)
output = engine.run(self.input_model_config, accelerator_specs, output_dir=output_dir)
Expand Down
6 changes: 2 additions & 4 deletions test/multiple_ep/test_python_env_system.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,7 @@ def test_run_pass_evaluate_windows(self, tmpdir):

metric = get_latency_metric(LatencySubType.AVG)
evaluator_config = OliveEvaluatorConfig(metrics=[metric])
options = {"execution_providers": self.execution_providers}
engine = Engine(options, target=self.system, host=self.system, evaluator_config=evaluator_config)
engine = Engine(target=self.system, host=self.system, evaluator_config=evaluator_config)
accelerator_specs = create_accelerators(self.system, self.execution_providers)

engine.register(OrtPerfTuning)
Expand All @@ -63,8 +62,7 @@ def test_run_pass_evaluate_linux(self, tmpdir):

metric = get_latency_metric(LatencySubType.AVG)
evaluator_config = OliveEvaluatorConfig(metrics=[metric])
options = {"execution_providers": self.execution_providers}
engine = Engine(options, target=self.system, host=self.system, evaluator_config=evaluator_config)
engine = Engine(target=self.system, host=self.system, evaluator_config=evaluator_config)
accelerator_specs = create_accelerators(self.system, self.execution_providers)
engine.register(OrtPerfTuning)
output = engine.run(
Expand Down
8 changes: 1 addition & 7 deletions test/unit_test/engine/test_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -621,12 +621,6 @@ def test_docker_system(self, mock_docker_system, mock_local_system, tmpdir):
metric = get_accuracy_metric(AccuracySubType.ACCURACY_SCORE)
evaluator_config = OliveEvaluatorConfig(metrics=[metric])

engine = Engine(
options,
host=mock_local_system,
target=mock_docker_system,
evaluator_config=evaluator_config,
execution_providers=["OpenVINOExecutionProvider", "CPUExecutionProvider"],
)
engine = Engine(options, host=mock_local_system, target=mock_docker_system, evaluator_config=evaluator_config)

assert engine.target.system_type == SystemType.Docker

0 comments on commit b13a897

Please sign in to comment.