diff --git a/inference-engine/samples/benchmark_app/main.cpp b/inference-engine/samples/benchmark_app/main.cpp index f317ffb63dc2c7..aea831a214f2fb 100644 --- a/inference-engine/samples/benchmark_app/main.cpp +++ b/inference-engine/samples/benchmark_app/main.cpp @@ -204,8 +204,7 @@ int main(int argc, char* argv[]) { else if (FLAGS_mode == "latency" || FLAGS_mode == "LATENCY") ov_perf_mode = CONFIG_VALUE(LATENCY); else if (!FLAGS_mode.empty()) - throw std::logic_error("Performance mode " + ov_perf_mode + " is not recognized!"); - + throw std::logic_error("Performance mode " + ov_perf_mode + " is not recognized!"); bool perf_counts = false; // Update config per device according to command line parameters @@ -426,10 +425,9 @@ int main(int argc, char* argv[]) { std::cout << "OV_PERFORMANCE_MODE: " << ov_perf_mode << std::endl; // output of the actual settings that the mode produces (debugging) for (auto& device : devices) { - std::vector supported_config_keys = ie.GetMetric(device, - METRIC_KEY(SUPPORTED_CONFIG_KEYS)); + std::vector supported_config_keys = ie.GetMetric(device, METRIC_KEY(SUPPORTED_CONFIG_KEYS)); std::cout << "Device: " << device << std::endl; - for (auto cfg : supported_config_keys) { + for (auto cfg : supported_config_keys) { std::cout << " {" << cfg << " , " << exeNetwork.GetConfig(cfg).as() << " }" << std::endl; } } diff --git a/inference-engine/src/mkldnn_plugin/config.cpp b/inference-engine/src/mkldnn_plugin/config.cpp index ea95247b3fa465..1a1a115f8548ac 100644 --- a/inference-engine/src/mkldnn_plugin/config.cpp +++ b/inference-engine/src/mkldnn_plugin/config.cpp @@ -27,13 +27,13 @@ Config::Config() { #if (IE_THREAD == IE_THREAD_TBB || IE_THREAD == IE_THREAD_TBB_AUTO) #if defined(__APPLE__) || defined(_WIN32) // 'CORES' is not implemented for Win/MacOS; so the 'NUMA' is default - streamExecutorConfig._threadBindingType = InferenceEngine::IStreamsExecutor::NUMA; -#endif + streamExecutorConfig._threadBindingType = InferenceEngine::IStreamsExecutor::NUMA; + #endif if (getAvailableCoresTypes().size() > 1 /*Hybrid CPU*/) { streamExecutorConfig._threadBindingType = InferenceEngine::IStreamsExecutor::HYBRID_AWARE; } -#endif + #endif if (!with_cpu_x86_bfloat16()) enforceBF16 = false;