diff --git a/.github/workflows/job_onnx_models_tests.yml b/.github/workflows/job_onnx_models_tests.yml index 19bf3b23482b89..5cf311d52e193b 100644 --- a/.github/workflows/job_onnx_models_tests.yml +++ b/.github/workflows/job_onnx_models_tests.yml @@ -95,7 +95,7 @@ jobs: - name: Install Python tests dependencies run: | # To enable pytest parallel features - python3 -m pip install pytest-xdist[psutil] pytest-forked + python3 -m pip install pytest-xdist[psutil] pytest-forked pytest-randomly - name: ONNX Models Tests run: python3 -m pytest --backend="CPU" --model_zoo_dir="${MODELS_SHARE_PATH}" ${INSTALL_TEST_DIR}/onnx/tests/tests_python/test_zoo_models.py -v -n auto --forked -k 'not _cuda' --model_zoo_xfail diff --git a/src/plugins/auto/src/compiled_model.cpp b/src/plugins/auto/src/compiled_model.cpp index d4d781e635a9a3..d0e4a47bb8f273 100644 --- a/src/plugins/auto/src/compiled_model.cpp +++ b/src/plugins/auto/src/compiled_model.cpp @@ -63,7 +63,7 @@ ov::AnyMap ov::auto_plugin::CompiledModel::get_device_supported_properties(AutoC OPENVINO_ASSERT(context.m_compiled_model); auto device_supported_properties = context.m_compiled_model->get_property(ov::supported_properties.name()); for (auto&& property_name : device_supported_properties.as>()) { - // for lto issue, explictly do the conversion here + // For LTO issue, explicitly do the conversion here std::string query_name = property_name; device_properties[property_name] = context.m_compiled_model->get_property(query_name); } diff --git a/src/plugins/auto_batch/tests/unit/plugin_compile_model_test.cpp b/src/plugins/auto_batch/tests/unit/plugin_compile_model_test.cpp index 9569a9adb19f48..639da2fddde048 100644 --- a/src/plugins/auto_batch/tests/unit/plugin_compile_model_test.cpp +++ b/src/plugins/auto_batch/tests/unit/plugin_compile_model_test.cpp @@ -130,7 +130,7 @@ TEST_P(PluginCompileModelTest, PluginCompileModelBatchedModelWithRemoteContextTe } const std::vector plugin_compile_model_param_test = { - // Case 1: explict apply batch size by config of AUTO_BATCH_DEVICE_CONFIG + // Case 1: explicitly apply batch size by config of AUTO_BATCH_DEVICE_CONFIG plugin_compile_model_param{{{ov::hint::performance_mode.name(), ov::hint::PerformanceMode::THROUGHPUT}, {ov::optimal_batch_size.name(), static_cast(16)}, {ov::hint::num_requests(12)}, diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/concatenation_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/concatenation_gpu_test.cpp index 6f2ec418adac7f..6fc3b6287f918e 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/concatenation_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/concatenation_gpu_test.cpp @@ -1620,7 +1620,7 @@ INSTANTIATE_TEST_SUITE_P(smoke, concat_gpu::PrintToStringParamName); template -struct concat_gpu_4d_explict : public concat_gpu { +struct concat_gpu_4d_explicit : public concat_gpu { public: cldnn::memory::ptr run_concat_network(std::vector>>>> input, format::type fmt, ExecutionConfig config) { auto data_type = ov::element::from(); @@ -1757,7 +1757,7 @@ struct concat_gpu_4d_explict : public concat_gpu { }; -using concat_no_implicit_gpu_onednn_4d_f16 = concat_gpu_4d_explict; +using concat_no_implicit_gpu_onednn_4d_f16 = concat_gpu_4d_explicit; TEST_P(concat_no_implicit_gpu_onednn_4d_f16, input_order_opt_b_fs_yx_fsv16) { ASSERT_NO_FATAL_FAILURE(test(format::b_fs_yx_fsv16));