Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[GPU] Fixes port to 22.3 #17333

Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion src/bindings/c/tests/ov_compiled_model_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,10 @@ TEST_P(ov_compiled_model, set_and_get_property) {

char* info = nullptr;
const char* key_0 = ov_property_key_available_devices;
if (ov_core_get_property(core, "GPU", key_0, &info) != ov_status_e::OK) {
EXPECT_EQ(ov_core_get_property(core, "GPU", key_0, &info), ov_status_e::OK);
EXPECT_STRNE(info, nullptr);
if (strlen(info) == 0) {
ov_free(info);
ov_core_free(core);
GTEST_SKIP();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,11 +24,29 @@ enum class engine_types : int32_t {
ocl,
};

inline std::ostream& operator<<(std::ostream& os, const engine_types& type) {
switch (type) {
case engine_types::ocl: os << "ocl"; break;
default: os << "unknown"; break;
}

return os;
}

/// @brief Defines available runtime types
enum class runtime_types : int32_t {
ocl,
};

inline std::ostream& operator<<(std::ostream& os, const runtime_types& type) {
switch (type) {
case runtime_types::ocl: os << "ocl"; break;
default: os << "unknown"; break;
}

return os;
}

/// @brief Defines available priority mode types
enum class priority_mode_types : int16_t {
disabled,
Expand Down
56 changes: 37 additions & 19 deletions src/plugins/intel_gpu/src/plugin/plugin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -496,6 +496,8 @@ InferenceEngine::IExecutableNetworkInternal::Ptr Plugin::ImportNetwork(std::istr

Parameter Plugin::GetConfig(const std::string& name, const std::map<std::string, Parameter>& options) const {
OV_ITT_SCOPED_TASK(itt::domains::intel_gpu_plugin, "Plugin::GetConfig");
OPENVINO_ASSERT(!device_map.empty(), "[GPU] Can't get ", name, " property as no supported devices found or an error happened during devices query.\n"
"[GPU] Please check OpenVINO documentation for GPU drivers setup guide.\n");
Parameter result;

std::string device_id;
Expand Down Expand Up @@ -569,17 +571,9 @@ auto StringRightTrim = [](std::string string, std::string substring, bool case_s
Parameter Plugin::GetMetric(const std::string& name, const std::map<std::string, Parameter>& options) const {
OV_ITT_SCOPED_TASK(itt::domains::intel_gpu_plugin, "Plugin::GetMetric");
GPU_DEBUG_GET_INSTANCE(debug_config);
std::string device_id = GetConfig(ov::device::id.name(), options);

auto iter = device_map.find(std::to_string(cldnn::device_query::device_id));
if (iter == device_map.end())
iter = device_map.find(device_id);
if (iter == device_map.end())
iter = device_map.begin();
auto device = iter->second;
auto device_info = device->get_info();
bool is_new_api = IsNewAPI();

// The metrics below don't depend on the device ID, so we should handle those
// earler than querying actual ID to avoid exceptions when no devices are found
if (name == ov::supported_properties) {
return decltype(ov::supported_properties)::value_type {
// Metrics
Expand Down Expand Up @@ -636,12 +630,44 @@ Parameter Plugin::GetMetric(const std::string& name, const std::map<std::string,
metrics.push_back(GPU_METRIC_KEY(EXECUTION_UNITS_COUNT));
metrics.push_back(GPU_METRIC_KEY(MEMORY_STATISTICS));
IE_SET_METRIC_RETURN(SUPPORTED_METRICS, metrics);
} else if (name == METRIC_KEY(SUPPORTED_CONFIG_KEYS)) {
std::vector<std::string> configKeys;
Configs dummy_cfg;
dummy_cfg.CreateConfig("0");
for (auto opt : dummy_cfg.GetConfig("0").key_config_map) {
// Exclude new API properties
if (!Config::isNewApiProperty(opt.first))
configKeys.push_back(opt.first);
}
IE_SET_METRIC_RETURN(SUPPORTED_CONFIG_KEYS, configKeys);
} else if (name == METRIC_KEY(AVAILABLE_DEVICES)) {
std::vector<std::string> availableDevices = { };
for (auto const& dev : device_map)
availableDevices.push_back(dev.first);
return decltype(ov::available_devices)::value_type {availableDevices};
} else if (name == ov::intel_gpu::device_total_mem_size) {
} else if (name == ov::caching_properties) {
std::vector<ov::PropertyName> cachingProperties;
cachingProperties.push_back(ov::PropertyName(ov::device::architecture.name(), PropertyMutability::RO));
cachingProperties.push_back(ov::PropertyName(ov::intel_gpu::execution_units_count.name(), PropertyMutability::RO));
cachingProperties.push_back(ov::PropertyName(ov::intel_gpu::driver_version.name(), PropertyMutability::RO));
cachingProperties.push_back(ov::PropertyName(ov::hint::inference_precision.name(), PropertyMutability::RW));
return decltype(ov::caching_properties)::value_type(cachingProperties);
} else if (name == METRIC_KEY(IMPORT_EXPORT_SUPPORT)) {
IE_SET_METRIC_RETURN(IMPORT_EXPORT_SUPPORT, true);
}

std::string device_id = GetConfig(ov::device::id.name(), options);

auto iter = device_map.find(std::to_string(cldnn::device_query::device_id));
if (iter == device_map.end())
iter = device_map.find(device_id);
if (iter == device_map.end())
iter = device_map.begin();
auto device = iter->second;
auto device_info = device->get_info();
bool is_new_api = IsNewAPI();

if (name == ov::intel_gpu::device_total_mem_size) {
return decltype(ov::intel_gpu::device_total_mem_size)::value_type {device_info.max_global_mem_size};
} else if (name == ov::device::type) {
if (is_new_api) {
Expand Down Expand Up @@ -759,14 +785,6 @@ Parameter Plugin::GetMetric(const std::string& name, const std::map<std::string,
auto deviceName = StringRightTrim(device_info.dev_name, "NEO", false);
deviceName += std::string(" (") + (device_info.dev_type == cldnn::device_type::discrete_gpu ? "dGPU" : "iGPU") + ")";
return decltype(ov::device::full_name)::value_type {deviceName};
} else if (name == METRIC_KEY(SUPPORTED_CONFIG_KEYS)) {
std::vector<std::string> configKeys;
for (auto opt : _impl->m_configs.GetConfig(device_id).key_config_map) {
// Exclude new API properties
if (!Config::isNewApiProperty(opt.first))
configKeys.push_back(opt.first);
}
IE_SET_METRIC_RETURN(SUPPORTED_CONFIG_KEYS, configKeys);
} else if (name == ov::device::capabilities) {
std::vector<std::string> capabilities;

Expand Down
4 changes: 0 additions & 4 deletions src/plugins/intel_gpu/src/runtime/device_query.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,5 @@ device_query::device_query(engine_types engine_type,
}
default: throw std::runtime_error("Unsupported engine type in device_query");
}

if (_available_devices.empty()) {
throw std::runtime_error("No suitable devices found for requested engine and runtime types");
}
}
} // namespace cldnn
3 changes: 3 additions & 0 deletions src/plugins/intel_gpu/src/runtime/engine.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -280,6 +280,9 @@ std::shared_ptr<cldnn::engine> engine::create(engine_types engine_type,
device_query query(engine_type, runtime_type);
auto devices = query.get_available_devices();

OPENVINO_ASSERT(!devices.empty(), "[GPU] Can't create ", engine_type, " engine for ", runtime_type, " runtime as no suitable devices are found\n"
"[GPU] Please check OpenVINO documentation for GPU drivers setup guide.\n");

auto iter = devices.find(std::to_string(device_query::device_id));
auto& device = iter != devices.end() ? iter->second : devices.begin()->second;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -191,6 +191,10 @@ std::vector<device::ptr> ocl_device_detector::create_device_list() const {
cl_uint num_platforms = 0;
// Get number of platforms availible
cl_int error_code = clGetPlatformIDs(0, NULL, &num_platforms);
if (num_platforms == 0 || error_code == CL_PLATFORM_NOT_FOUND_KHR) {
return {};
}

OPENVINO_ASSERT(error_code == CL_SUCCESS, create_device_error_msg, "[GPU] clGetPlatformIDs error code: ", std::to_string(error_code));
// Get platform list
std::vector<cl_platform_id> platform_ids(num_platforms);
Expand All @@ -210,10 +214,11 @@ std::vector<device::ptr> ocl_device_detector::create_device_list() const {
supported_devices.emplace_back(std::make_shared<ocl_device>(device, cl::Context(device), id));
}
} catch (std::exception& ex) {
GPU_DEBUG_COUT << "Devices query/creation failed for " << platform.getInfo<CL_PLATFORM_NAME>() << ": " << ex.what() << std::endl;
GPU_DEBUG_COUT << "Platform is skipped" << std::endl;
continue;
}
}
OPENVINO_ASSERT(!supported_devices.empty(), create_device_error_msg);
return supported_devices;
}

Expand Down
2 changes: 1 addition & 1 deletion src/plugins/intel_gpu/tests/gtest_main_gpu.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@
#include <cstdio>
#include <string>

#include "../../intel_gpu/include/intel_gpu/runtime/device_query.hpp"
#include "intel_gpu/runtime/device_query.hpp"
#include "gtest/gtest.h"
#include "test_utils/test_utils.h"
#include "gflags/gflags.h"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,8 @@ template <typename T>
void start_cl_mem_check_2_inputs(bool is_caching_test) {
device_query query(engine_types::ocl, runtime_types::ocl);
auto devices = query.get_available_devices();
ASSERT_TRUE(!devices.empty());

auto iter = devices.find(std::to_string(device_query::device_id));
auto& device = iter != devices.end() ? iter->second : devices.begin()->second;
auto engine = engine::create(engine_types::ocl, runtime_types::ocl, device);
Expand Down Expand Up @@ -170,6 +172,7 @@ TEST(export_import_cl_mem_check, check_2_inputs) {
TEST(cl_mem_check, check_input) {
device_query query(engine_types::ocl, runtime_types::ocl);
auto devices = query.get_available_devices();
ASSERT_TRUE(!devices.empty());
auto iter = devices.find(std::to_string(device_query::device_id));
auto& device = iter != devices.end() ? iter->second : devices.begin()->second;
auto engine = engine::create(engine_types::ocl, runtime_types::ocl, device);
Expand Down Expand Up @@ -278,6 +281,7 @@ TEST(cl_mem_check, check_input) {
TEST(cl_mem_check, check_write_access_type) {
device_query query(engine_types::ocl, runtime_types::ocl);
auto devices = query.get_available_devices();
ASSERT_TRUE(!devices.empty());
cldnn::device::ptr device = devices.begin()->second;
for (auto& dev : devices) {
if (dev.second->get_info().dev_type == device_type::discrete_gpu)
Expand Down Expand Up @@ -312,6 +316,7 @@ TEST(cl_mem_check, check_write_access_type) {
TEST(cl_mem_check, check_read_access_type) {
device_query query(engine_types::ocl, runtime_types::ocl);
auto devices = query.get_available_devices();
ASSERT_TRUE(!devices.empty());
cldnn::device::ptr device = devices.begin()->second;
for (auto& dev : devices) {
if (dev.second->get_info().dev_type == device_type::discrete_gpu)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -296,6 +296,7 @@ TEST(convert_color, nv12_to_rgb_two_planes_surface_u8) {

device_query query(engine_types::ocl, runtime_types::ocl);
auto devices = query.get_available_devices();
ASSERT_TRUE(!devices.empty());
auto iter = devices.find(std::to_string(device_query::device_id));
auto& device = iter != devices.end() ? iter->second : devices.begin()->second;
auto engine = engine::create(engine_types::ocl, runtime_types::ocl, device);
Expand Down Expand Up @@ -376,6 +377,7 @@ TEST(convert_color, nv12_to_rgb_single_plane_surface_u8) {

device_query query(engine_types::ocl, runtime_types::ocl);
auto devices = query.get_available_devices();
ASSERT_TRUE(!devices.empty());
auto iter = devices.find(std::to_string(device_query::device_id));
auto& device = iter != devices.end() ? iter->second : devices.begin()->second;
auto engine = engine::create(engine_types::ocl, runtime_types::ocl, device);
Expand Down Expand Up @@ -532,6 +534,7 @@ void test_convert_color_i420_to_rgb_three_planes_surface_u8(bool is_caching_test

device_query query(engine_types::ocl, runtime_types::ocl);
auto devices = query.get_available_devices();
ASSERT_TRUE(!devices.empty());
auto iter = devices.find(std::to_string(device_query::device_id));
auto& device = iter != devices.end() ? iter->second : devices.begin()->second;
auto engine = engine::create(engine_types::ocl, runtime_types::ocl, device);
Expand Down
1 change: 0 additions & 1 deletion src/plugins/intel_gpu/tests/test_cases/mem_perf_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
#include <intel_gpu/primitives/input_layout.hpp>
#include <intel_gpu/primitives/activation.hpp>
#include <intel_gpu/primitives/data.hpp>
#include <intel_gpu/runtime/device_query.hpp>

static size_t img_size = 800;
static std::string kernel_code =
Expand Down