diff --git a/olive/engine/engine.py b/olive/engine/engine.py index 4b73e1d3b..bff49bd29 100644 --- a/olive/engine/engine.py +++ b/olive/engine/engine.py @@ -48,7 +48,6 @@ def __init__( host: Optional[OliveSystem] = None, target: Optional[OliveSystem] = None, evaluator_config: Optional["OliveEvaluatorConfig"] = None, - execution_providers: Optional[List[str]] = None, ): self._config = validate_config(config, EngineConfig) @@ -94,14 +93,11 @@ def __init__( # {"pass_name": {"pass": pass, "host": host, "evaluator": evaluator, "clean_run_cache": clean_run_cache}} self.passes = OrderedDict() - self.pass_flows = None self.pass_flows_search_spaces = None self.footprints = defaultdict(Footprint) - self.azureml_client_config = self._config.azureml_client_config - self._initialized = False def initialize(self): diff --git a/olive/workflows/run/config.py b/olive/workflows/run/config.py index 1ffac5c36..6d2b63ae4 100644 --- a/olive/workflows/run/config.py +++ b/olive/workflows/run/config.py @@ -44,6 +44,7 @@ def create_engine(self): config = self.dict() to_del = [ "evaluate_input_model", + "execution_providers", "output_dir", "output_name", "packaging_config", diff --git a/test/multiple_ep/test_aml_system.py b/test/multiple_ep/test_aml_system.py index b113330b0..f83f36be4 100644 --- a/test/multiple_ep/test_aml_system.py +++ b/test/multiple_ep/test_aml_system.py @@ -54,8 +54,7 @@ def test_run_pass_evaluate(self, tmpdir): metric = get_latency_metric() evaluator_config = OliveEvaluatorConfig(metrics=[metric]) - options = {"execution_providers": self.execution_providers} - engine = Engine(options, target=self.system, host=self.system, evaluator_config=evaluator_config) + engine = Engine(target=self.system, host=self.system, evaluator_config=evaluator_config) accelerator_specs = create_accelerators(self.system, self.execution_providers) engine.register(OrtPerfTuning) output = engine.run(self.input_model_config, accelerator_specs, output_dir=output_dir) diff --git a/test/multiple_ep/test_docker_system.py b/test/multiple_ep/test_docker_system.py index 81bb6abeb..159eb28e1 100644 --- a/test/multiple_ep/test_docker_system.py +++ b/test/multiple_ep/test_docker_system.py @@ -40,8 +40,7 @@ def test_run_pass_evaluate(self, tmpdir): metric = get_latency_metric() evaluator_config = OliveEvaluatorConfig(metrics=[metric]) - options = {"execution_providers": self.execution_providers} - engine = Engine(options, target=self.system, evaluator_config=evaluator_config) + engine = Engine(target=self.system, evaluator_config=evaluator_config) accelerator_specs = create_accelerators(self.system, self.execution_providers) engine.register(OrtPerfTuning) output = engine.run(self.input_model_config, accelerator_specs, output_dir=output_dir) diff --git a/test/multiple_ep/test_python_env_system.py b/test/multiple_ep/test_python_env_system.py index 4cf0cc377..5665e392c 100644 --- a/test/multiple_ep/test_python_env_system.py +++ b/test/multiple_ep/test_python_env_system.py @@ -36,8 +36,7 @@ def test_run_pass_evaluate_windows(self, tmpdir): metric = get_latency_metric(LatencySubType.AVG) evaluator_config = OliveEvaluatorConfig(metrics=[metric]) - options = {"execution_providers": self.execution_providers} - engine = Engine(options, target=self.system, host=self.system, evaluator_config=evaluator_config) + engine = Engine(target=self.system, host=self.system, evaluator_config=evaluator_config) accelerator_specs = create_accelerators(self.system, self.execution_providers) engine.register(OrtPerfTuning) @@ -63,8 +62,7 @@ def test_run_pass_evaluate_linux(self, tmpdir): metric = get_latency_metric(LatencySubType.AVG) evaluator_config = OliveEvaluatorConfig(metrics=[metric]) - options = {"execution_providers": self.execution_providers} - engine = Engine(options, target=self.system, host=self.system, evaluator_config=evaluator_config) + engine = Engine(target=self.system, host=self.system, evaluator_config=evaluator_config) accelerator_specs = create_accelerators(self.system, self.execution_providers) engine.register(OrtPerfTuning) output = engine.run( diff --git a/test/unit_test/engine/test_engine.py b/test/unit_test/engine/test_engine.py index 34bbacbba..0d79b9986 100644 --- a/test/unit_test/engine/test_engine.py +++ b/test/unit_test/engine/test_engine.py @@ -621,12 +621,6 @@ def test_docker_system(self, mock_docker_system, mock_local_system, tmpdir): metric = get_accuracy_metric(AccuracySubType.ACCURACY_SCORE) evaluator_config = OliveEvaluatorConfig(metrics=[metric]) - engine = Engine( - options, - host=mock_local_system, - target=mock_docker_system, - evaluator_config=evaluator_config, - execution_providers=["OpenVINOExecutionProvider", "CPUExecutionProvider"], - ) + engine = Engine(options, host=mock_local_system, target=mock_docker_system, evaluator_config=evaluator_config) assert engine.target.system_type == SystemType.Docker