Skip to content

Commit

Permalink
Replaced impl factory API to get kernel_impl_param's pointer
Browse files Browse the repository at this point in the history
  • Loading branch information
yeonbok committed Jul 21, 2022
1 parent eb35673 commit feced4e
Show file tree
Hide file tree
Showing 92 changed files with 293 additions and 293 deletions.
2 changes: 1 addition & 1 deletion src/plugins/intel_gpu/src/graph/impls/common/condition.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ struct condition_impl : typed_primitive_impl<condition> {
return ev;
}

static primitive_impl* create(const condition_node& arg, const kernel_impl_params&) { return new condition_impl(arg); }
static primitive_impl* create(const condition_node& arg, std::shared_ptr<kernel_impl_params>) { return new condition_impl(arg); }

void init_kernels() override {}

Expand Down
2 changes: 1 addition & 1 deletion src/plugins/intel_gpu/src/graph/impls/common/loop.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -164,7 +164,7 @@ struct loop_impl : typed_primitive_impl<loop> {
return ev;
}

static primitive_impl* create(const loop_node& arg, const kernel_impl_params&) { return new loop_impl(arg); }
static primitive_impl* create(const loop_node& arg, std::shared_ptr<kernel_impl_params>) { return new loop_impl(arg); }
};

namespace detail {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,13 +32,13 @@ class wait_for_events_impl : public primitive_impl {

bool validate(const primitive_inst&) const override { return true; }

static primitive_impl* create_data(const data_node& data, const kernel_impl_params&) { return new wait_for_events_impl(data); }
static primitive_impl* create_data(const data_node& data, std::shared_ptr<kernel_impl_params>) { return new wait_for_events_impl(data); }

static primitive_impl* create_input_layout(const input_layout_node& input, const kernel_impl_params&) {
static primitive_impl* create_input_layout(const input_layout_node& input, std::shared_ptr<kernel_impl_params>) {
return new wait_for_events_impl(input);
}

static primitive_impl* create_prior_box(const prior_box_node& prior_box, const kernel_impl_params&) {
static primitive_impl* create_prior_box(const prior_box_node& prior_box, std::shared_ptr<kernel_impl_params>) {
// This primitive is being executed on CPU during network compilation.
return new wait_for_events_impl(prior_box);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -833,7 +833,7 @@ struct detection_output_impl : typed_primitive_impl<detection_output> {

void init_kernels() override {}

static primitive_impl* create(const detection_output_node& arg, const kernel_impl_params&) { return new detection_output_impl(arg); }
static primitive_impl* create(const detection_output_node& arg, std::shared_ptr<kernel_impl_params>) { return new detection_output_impl(arg); }
};

namespace detail {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -401,7 +401,7 @@ struct non_max_suppression_impl : typed_primitive_impl<non_max_suppression> {
return ev;
}

static primitive_impl* create(const non_max_suppression_node&, const kernel_impl_params&) {
static primitive_impl* create(const non_max_suppression_node&, std::shared_ptr<kernel_impl_params>) {
return new non_max_suppression_impl();
}
void init_kernels() override {}
Expand Down
4 changes: 2 additions & 2 deletions src/plugins/intel_gpu/src/graph/impls/cpu/proposal.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -429,8 +429,8 @@ struct proposal_impl : typed_primitive_impl<proposal> {

void init_kernels() override {}

static primitive_impl* create(const proposal_node& arg, const kernel_impl_params& impl_param) {
const layout& l = impl_param.input_layouts[2];
static primitive_impl* create(const proposal_node& arg, std::shared_ptr<kernel_impl_params> impl_param) {
const layout& l = impl_param->input_layouts[2];
const size_t count = l.feature() == 1 ? static_cast<size_t>(l.batch()) : static_cast<size_t>(l.feature());

// Supported image_info sizes and components meaning:
Expand Down
16 changes: 8 additions & 8 deletions src/plugins/intel_gpu/src/graph/impls/implementation_map.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -154,11 +154,11 @@ class implementation_map {
public:
using key_builder = implementation_key<primitive_kind>;
using key_type = typename key_builder::type;
using factory_type = std::function<primitive_impl*(const typed_program_node<primitive_kind>&, const kernel_impl_params&)>;
using factory_type = std::function<primitive_impl*(const typed_program_node<primitive_kind>&, std::shared_ptr<kernel_impl_params>)>;
using map_type = singleton_map<impl_types, std::pair<std::set<key_type>, factory_type>>;

static factory_type get(const kernel_impl_params& impl_param, impl_types preferred_impl_type) {
auto key = key_builder()(impl_param.input_layouts[0]);
static factory_type get(std::shared_ptr<kernel_impl_params> impl_param, impl_types preferred_impl_type) {
auto key = key_builder()(impl_param->input_layouts[0]);
for (auto& kv : map_type::instance()) {
impl_types impl_type = kv.first;
if ((preferred_impl_type & impl_type) != impl_type)
Expand All @@ -173,20 +173,20 @@ class implementation_map {
target_impl_type_ss << preferred_impl_type;
throw std::runtime_error(std::string("implementation_map for ") + typeid(primitive_kind).name() +
" could not find any implementation to match key: " +
get_key_name(key) + ", impl_type: " + target_impl_type_ss.str() + ", node_id: " + impl_param.desc->id);
get_key_name(key) + ", impl_type: " + target_impl_type_ss.str() + ", node_id: " + impl_param->desc->id);
}

// check if for a given engine and type there exist an implementation
static bool check(const typed_program_node<primitive_kind>& primitive, const kernel_impl_params& impl_params) {
static bool check(const typed_program_node<primitive_kind>& primitive, std::shared_ptr<kernel_impl_params> impl_params) {
impl_types target_impl_type = primitive.get_preferred_impl_type();
auto key = key_builder()(impl_params.input_layouts[0]);
auto key = key_builder()(impl_params->input_layouts[0]);
return check_key(target_impl_type, key);
}

// check if there exists a kernel implementation of a primitive with output set it primitive's output layout
static bool check_io_eq(const typed_program_node<primitive_kind>& primitive, const kernel_impl_params& impl_params) {
static bool check_io_eq(const typed_program_node<primitive_kind>& primitive, std::shared_ptr<kernel_impl_params> impl_params) {
impl_types target_impl_type = primitive.get_preferred_impl_type();
auto key = key_builder()(impl_params.output_layout);
auto key = key_builder()(impl_params->output_layout);
return check_key(target_impl_type, key);
}

Expand Down
8 changes: 4 additions & 4 deletions src/plugins/intel_gpu/src/graph/impls/ocl/activation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,17 +30,17 @@ struct activation_impl : typed_primitive_impl_ocl<activation> {

return args;
}
static primitive_impl* create(const activation_node& arg, const kernel_impl_params& impl_param) {
static primitive_impl* create(const activation_node& arg, std::shared_ptr<kernel_impl_params> impl_param) {
const auto& prim = arg.get_primitive();
auto activation_params = get_default_params<kernel_selector::activation_params>(impl_param);
auto activation_params = get_default_params<kernel_selector::activation_params>(*impl_param);
auto activation_optional_params =
get_default_optional_params<kernel_selector::activation_optional_params>(arg.get_program());

convert_new_activation_func(prim, activation_params.activations);

if (arg.is_parameterized()) {
const auto& slope_layout = impl_param.input_layouts[1];
const auto& output_layout = impl_param.output_layout;
const auto& slope_layout = impl_param->input_layouts[1];
const auto& output_layout = impl_param->output_layout;

const auto params_num =
kernel_selector::GetActivationAdditionalParamsNumber(activation_params.activations[0].function);
Expand Down
6 changes: 3 additions & 3 deletions src/plugins/intel_gpu/src/graph/impls/ocl/arg_max_min.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ struct arg_max_min_impl : typed_primitive_impl_ocl<arg_max_min> {
}

public:
static primitive_impl* create(const arg_max_min_node& arg, const kernel_impl_params impl_param) {
static primitive_impl* create(const arg_max_min_node& arg, std::shared_ptr<kernel_impl_params> impl_param) {
const auto& primitive = arg.get_primitive();
const auto& axis = primitive->axis;
const auto& top_k = primitive->top_k;
Expand All @@ -44,7 +44,7 @@ struct arg_max_min_impl : typed_primitive_impl_ocl<arg_max_min> {
const auto& values_first = primitive->values_first;
const auto& outputs_num = primitive->input.size() == 3 ? 2 : 1; // second output passed as input for TOP_K layer

auto argm_params = get_default_params<kernel_selector::arg_max_min_params>(impl_param);
auto argm_params = get_default_params<kernel_selector::arg_max_min_params>(*impl_param);
auto argm_optional_params =
get_default_optional_params<kernel_selector::arg_max_min_optional_params>(arg.get_program());

Expand Down Expand Up @@ -83,7 +83,7 @@ struct arg_max_min_impl : typed_primitive_impl_ocl<arg_max_min> {
argm_params.argMaxMinSortType = kernel_selector::argm_sort::INDEX;

if (outputs_num == 2) {
argm_params.inputs.push_back(convert_data_tensor(impl_param.input_layouts[2]));
argm_params.inputs.push_back(convert_data_tensor(impl_param->input_layouts[2]));
}

argm_params.values_first = values_first;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,9 @@ struct average_unpooling_impl : typed_primitive_impl_ocl<average_unpooling> {
}

public:
static primitive_impl* create(const average_unpooling_node& arg, const kernel_impl_params& impl_param) {
static primitive_impl* create(const average_unpooling_node& arg, std::shared_ptr<kernel_impl_params> impl_param) {
auto primitive = arg.get_primitive();
auto average_unpooling_params = get_default_params<kernel_selector::average_unpooling_params>(impl_param);
auto average_unpooling_params = get_default_params<kernel_selector::average_unpooling_params>(*impl_param);
auto average_unpooling_optional_params =
get_default_optional_params<kernel_selector::average_unpooling_optional_params>(arg.get_program());
auto& params = average_unpooling_params;
Expand Down
4 changes: 2 additions & 2 deletions src/plugins/intel_gpu/src/graph/impls/ocl/batch_to_space.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,9 @@ struct batch_to_space_impl : typed_primitive_impl_ocl<batch_to_space> {
}

public:
static primitive_impl* create(const batch_to_space_node& arg, const kernel_impl_params& impl_param) {
static primitive_impl* create(const batch_to_space_node& arg, std::shared_ptr<kernel_impl_params> impl_param) {
auto primitive = arg.get_primitive();
auto batch_to_space_params = get_default_params<kernel_selector::batch_to_space_params>(impl_param);
auto batch_to_space_params = get_default_params<kernel_selector::batch_to_space_params>(*impl_param);
auto batch_to_space_optional_params =
get_default_optional_params<kernel_selector::batch_to_space_optional_params>(arg.get_program());

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -60,9 +60,9 @@ struct binary_convolution_impl : typed_primitive_impl_ocl<binary_convolution> {
int32_t get_split() const override { return _outer.get_split(); }

public:
static primitive_impl* create(const binary_convolution_node& arg, const kernel_impl_params impl_param) {
static primitive_impl* create(const binary_convolution_node& arg, std::shared_ptr<kernel_impl_params> impl_param) {
const auto& primitive = arg.get_primitive();
const auto& weights_layout = impl_param.weights_layout.convert_to_weights_layout(false);
const auto& weights_layout = impl_param->weights_layout.convert_to_weights_layout(false);
const auto& weights_size = weights_layout.size;

const auto& split = primitive->split();
Expand All @@ -74,9 +74,9 @@ struct binary_convolution_impl : typed_primitive_impl_ocl<binary_convolution> {
const auto depthwise_separable_opt = arg.get_depthwise_sep_opt();
const auto actual_split = depthwise_separable_opt ? (decltype(split))1 : split;

assert(impl_param.output_layout.feature() / primitive->split() == weights_layout.batch());
assert(impl_param->output_layout.feature() / primitive->split() == weights_layout.batch());

auto conv_params = get_weights_bias_default_params<kernel_selector::binary_convolution_params>(impl_param, actual_split);
auto conv_params = get_weights_bias_default_params<kernel_selector::binary_convolution_params>(*impl_param, actual_split);
auto conv_optional_params = get_default_weights_bias_optional_params<kernel_selector::binary_convolution_optional_params>(
arg.get_program());

Expand Down
4 changes: 2 additions & 2 deletions src/plugins/intel_gpu/src/graph/impls/ocl/border.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,10 @@ struct border_impl : typed_primitive_impl_ocl<border> {
return make_unique<border_impl>(*this);
}

static primitive_impl* create(const border_node& arg, const kernel_impl_params& impl_param) {
static primitive_impl* create(const border_node& arg, std::shared_ptr<kernel_impl_params> impl_param) {
auto desc = arg.get_primitive();

auto b_params = get_default_params<kernel_selector::border_params>(impl_param, 1);
auto b_params = get_default_params<kernel_selector::border_params>(*impl_param, 1);
auto b_optional_params =
get_default_optional_params<kernel_selector::border_optional_params>(arg.get_program());

Expand Down
6 changes: 3 additions & 3 deletions src/plugins/intel_gpu/src/graph/impls/ocl/broadcast.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,13 +22,13 @@ struct broadcast_impl : typed_primitive_impl_ocl<broadcast> {
return make_unique<broadcast_impl>(*this);
}

static primitive_impl* create(const broadcast_node& arg, const kernel_impl_params& impl_param) {
static primitive_impl* create(const broadcast_node& arg, std::shared_ptr<kernel_impl_params> impl_param) {
const auto& primitive = arg.get_primitive();
auto bc_params = get_default_params<kernel_selector::broadcast_params>(impl_param, 1);
auto bc_params = get_default_params<kernel_selector::broadcast_params>(*impl_param, 1);
auto bc_optional_params =
get_default_optional_params<kernel_selector::broadcast_optional_params>(arg.get_program());

const auto format = impl_param.output_layout.format;
const auto format = impl_param->output_layout.format;
size_t max_axes_num = format.dimension();

const auto& broadcast_axes = primitive->broadcast_axes;
Expand Down
8 changes: 4 additions & 4 deletions src/plugins/intel_gpu/src/graph/impls/ocl/concatenation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -69,22 +69,22 @@ struct concatenation_impl : typed_primitive_impl_ocl<concatenation> {
}

public:
static primitive_impl* create(const concatenation_node& arg, const kernel_impl_params& impl_param) {
static primitive_impl* create(const concatenation_node& arg, std::shared_ptr<kernel_impl_params> impl_param) {
if (arg.can_be_optimized()) {
return new concatenation_impl(arg, {});
}
const auto& primitive = arg.get_primitive();
auto concat_params = get_default_params<kernel_selector::concatenation_params>(impl_param);
auto concat_params = get_default_params<kernel_selector::concatenation_params>(*impl_param);
auto concat_optional_params = get_default_optional_params<kernel_selector::concatenation_optional_params>(arg.get_program());
auto axis = primitive->axis;

concat_params.inputs.resize(arg.inputs_count());
for (size_t i = 0; i < arg.inputs_count(); ++i) {
const layout& input_layout = impl_param.input_layouts[i];
const layout& input_layout = impl_param->input_layouts[i];
concat_params.inputs[i] = convert_data_tensor(input_layout);
}

concat_params.axis = convert_axis(axis, impl_param.output_layout.get_rank());
concat_params.axis = convert_axis(axis, impl_param->output_layout.get_rank());
concat_optional_params.kernelPerInput = true;

auto& kernel_selector = kernel_selector::concatenation_kernel_selector::Instance();
Expand Down
8 changes: 4 additions & 4 deletions src/plugins/intel_gpu/src/graph/impls/ocl/convert_color.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -31,15 +31,15 @@ struct convert_color_impl : typed_primitive_impl_ocl<convert_color> {
}

public:
static primitive_impl* create(const convert_color_node& arg, const kernel_impl_params& impl_param) {
static primitive_impl* create(const convert_color_node& arg, std::shared_ptr<kernel_impl_params> impl_param) {
auto primitive = arg.get_primitive();

auto convert_color_params = get_default_params<kernel_selector::convert_color_params>(impl_param);
auto convert_color_params = get_default_params<kernel_selector::convert_color_params>(*impl_param);
auto convert_color_optional_params =
get_default_optional_params<kernel_selector::convert_color_optional_params>(arg.get_program());

for (size_t i = 1; i < impl_param.input_layouts.size(); ++i) {
convert_color_params.inputs.push_back(convert_data_tensor(impl_param.input_layouts[i]));
for (size_t i = 1; i < impl_param->input_layouts.size(); ++i) {
convert_color_params.inputs.push_back(convert_data_tensor(impl_param->input_layouts[i]));
}

convert_color_params.input_color_format = static_cast<kernel_selector::color_format>(primitive->input_color_format);
Expand Down
Loading

0 comments on commit feced4e

Please sign in to comment.