Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Use onnx_name field instead of op_name in MIGRAPHX_THROW as it is more informative and always defined #3532

Merged
merged 1 commit into from
Oct 18, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions src/include/migraphx/tune_axis.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,13 +30,13 @@
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {

inline int tune_axis(int n_dim, int axis, const std::string& op_name = "OPERATOR")
inline int tune_axis(int n_dim, int axis, const std::string& name = "OPERATOR")
{
if(axis < 0)
axis += n_dim;

if(axis < 0 or axis >= n_dim)
MIGRAPHX_THROW(to_upper(op_name) + ": axis is out of range.");
MIGRAPHX_THROW(to_upper(name) + ": axis is out of range.");

Check warning on line 39 in src/include/migraphx/tune_axis.hpp

View check run for this annotation

Codecov / codecov/patch

src/include/migraphx/tune_axis.hpp#L39

Added line #L39 was not covered by tests

return axis;
}
Expand Down
2 changes: 1 addition & 1 deletion src/onnx/include/migraphx/onnx/padding.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ void cal_auto_padding_size(onnx_parser::node_info info,
const std::vector<std::size_t>& in_lens,
std::vector<int64_t>& paddings);

void check_padding_mode(const onnx_parser::node_info& info, const std::string& op_name);
void check_padding_mode(const onnx_parser::node_info& info, const std::string& onnx_name);

void tune_padding_size(const value& v,
std::vector<int64_t>& padding,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ namespace onnx {

std::vector<instruction_ref>
transform_quantize_dequantize_linear_inputs(const onnx_parser::node_info& info,
const std::string& op_name,
const std::string& onnx_name,
int block_size,
int axis,
std::vector<instruction_ref> args);
Expand Down
4 changes: 2 additions & 2 deletions src/onnx/padding.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -81,15 +81,15 @@ bool is_asym_padding(const std::vector<int64_t>& padding)
return false;
}

void check_padding_mode(const onnx_parser::node_info& info, const std::string& op_name)
void check_padding_mode(const onnx_parser::node_info& info, const std::string& onnx_name)
{
// ensure pads availabe only when auto_pad is "NOT_SET"
if(contains(info.attributes, "pads") and contains(info.attributes, "auto_pad"))
{
auto s = info.attributes.at("auto_pad").s();
if(to_upper(s) != "NOTSET")
{
MIGRAPHX_THROW("PARSE_" + op_name +
MIGRAPHX_THROW("PARSE_" + to_upper(onnx_name) +
": auto_pad and padding cannot be specified simultaneously");
}
}
Expand Down
4 changes: 2 additions & 2 deletions src/onnx/parse_conv_transpose.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ struct parse_conv_transpose : op_parser<parse_conv_transpose>
{
std::vector<op_desc> operators() const { return {{"ConvTranspose"}}; }

instruction_ref parse(const op_desc& /*opd*/,
instruction_ref parse(const op_desc& opd,
const onnx_parser& parser,
onnx_parser::node_info info,
std::vector<instruction_ref> args) const
Expand All @@ -60,7 +60,7 @@ struct parse_conv_transpose : op_parser<parse_conv_transpose>
auto kdims = l0->get_shape().ndim() - 2;

// ensure pads available only when auto_pad is "NOT_SET"
check_padding_mode(info, "CONV_TRANSPOSE");
check_padding_mode(info, opd.onnx_name);

if(contains(info.attributes, "pads"))
{
Expand Down
2 changes: 1 addition & 1 deletion src/onnx/parse_convolution.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -240,7 +240,7 @@ struct parse_convolution : op_parser<parse_convolution>
auto kdims = in_lens.size() - 2;

// ensure pads available only when auto_pad is "NOT_SET"
check_padding_mode(info, "CONV");
check_padding_mode(info, opd.onnx_name);

if(contains(info.attributes, "strides"))
{
Expand Down
4 changes: 2 additions & 2 deletions src/onnx/parse_dequantizelinear.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -71,8 +71,8 @@ struct parse_dequantizelinear : op_parser<parse_dequantizelinear>
if(contains(info.attributes, "block_size"))
block_size = info.attributes.at("block_size").i();

args =
transform_quantize_dequantize_linear_inputs(info, opd.op_name, block_size, axis, args);
args = transform_quantize_dequantize_linear_inputs(
info, opd.onnx_name, block_size, axis, args);

return info.add_instruction(make_op("dequantizelinear"), args);
}
Expand Down
2 changes: 1 addition & 1 deletion src/onnx/parse_gather_elements.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ struct parse_gather_elements : op_parser<parse_gather_elements>
}

int n_rank = data_s.lens().size();
int tuned_axis = tune_axis(n_rank, axis, opd.op_name);
int tuned_axis = tune_axis(n_rank, axis, opd.onnx_name);

auto axis_stride = data_s.strides()[tuned_axis];
int64_t data_elem_num = data_s.elements();
Expand Down
2 changes: 1 addition & 1 deletion src/onnx/parse_instancenorm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ struct parse_instancenorm : op_parser<parse_instancenorm>
auto scale = args[1];
auto bias = args[2];
if(not contains(valid_types, dtype))
MIGRAPHX_THROW(opd.op_name + ": invalid output type: " + std::to_string(dtype) +
MIGRAPHX_THROW(opd.onnx_name + ": invalid output type: " + std::to_string(dtype) +
". Valid types are 1 (float), 10 (half), and 11 (double).");

auto ndims = x->get_shape().ndim();
Expand Down
12 changes: 6 additions & 6 deletions src/onnx/parse_qlinearbinary.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -107,10 +107,10 @@
}

// basic type checking for binary QLinear Operator
void check_inputs(const std::vector<instruction_ref>& args, const std::string& op_name) const
void check_inputs(const std::vector<instruction_ref>& args, const std::string& onnx_name) const
{
if(args.size() < 7)
MIGRAPHX_THROW(op_name + ": missing inputs");
MIGRAPHX_THROW(onnx_name + ": missing inputs");

Check warning on line 113 in src/onnx/parse_qlinearbinary.cpp

View check run for this annotation

Codecov / codecov/patch

src/onnx/parse_qlinearbinary.cpp#L113

Added line #L113 was not covered by tests

const auto& in_a = args[0];
const auto& in_b = args[3];
Expand All @@ -121,19 +121,19 @@
auto type_a = sh_a.type();
auto type_b = sh_b.type();
if(type_a != migraphx::shape::int8_type and type_a != migraphx::shape::uint8_type)
MIGRAPHX_THROW(op_name + ": unsupported input type");
MIGRAPHX_THROW(onnx_name + ": unsupported input type");

Check warning on line 124 in src/onnx/parse_qlinearbinary.cpp

View check run for this annotation

Codecov / codecov/patch

src/onnx/parse_qlinearbinary.cpp#L124

Added line #L124 was not covered by tests
if(type_b != migraphx::shape::int8_type and type_b != migraphx::shape::uint8_type)
MIGRAPHX_THROW(op_name + ": unsupported input type");
MIGRAPHX_THROW(onnx_name + ": unsupported input type");

Check warning on line 126 in src/onnx/parse_qlinearbinary.cpp

View check run for this annotation

Codecov / codecov/patch

src/onnx/parse_qlinearbinary.cpp#L126

Added line #L126 was not covered by tests
if(type_a != type_b)
MIGRAPHX_THROW(op_name + ": mismatched input types");
MIGRAPHX_THROW(onnx_name + ": mismatched input types");

Check warning on line 128 in src/onnx/parse_qlinearbinary.cpp

View check run for this annotation

Codecov / codecov/patch

src/onnx/parse_qlinearbinary.cpp#L128

Added line #L128 was not covered by tests
}

instruction_ref parse(const op_desc& opd,
const onnx_parser& /*parser*/,
const onnx_parser::node_info& info,
const std::vector<instruction_ref>& args) const
{
check_inputs(args, opd.op_name);
check_inputs(args, opd.onnx_name);

// A
const auto& in_a = args[0];
Expand Down
9 changes: 5 additions & 4 deletions src/onnx/parse_qlinearconv.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,8 @@ struct parse_qlinearconv : op_parser<parse_qlinearconv>
}

// process all attributes of QLinearConv Operator..
value process_attributes(const onnx_parser& parser,
value process_attributes(const op_desc& opd,
const onnx_parser& parser,
const onnx_parser::node_info& info,
const std::vector<instruction_ref>& args) const
{
Expand All @@ -130,7 +131,7 @@ struct parse_qlinearconv : op_parser<parse_qlinearconv>

size_t kdims = in_x->get_shape().ndim() - 2;

check_padding_mode(info, "QLINEARCONV");
check_padding_mode(info, opd.onnx_name);

values["stride"] = std::vector<int>(kdims, 1);
values["dilation"] = std::vector<int>(kdims, 1);
Expand Down Expand Up @@ -195,14 +196,14 @@ struct parse_qlinearconv : op_parser<parse_qlinearconv>
return info.add_instruction(migraphx::make_op("add"), conv_instr, f_bias);
};

instruction_ref parse(const op_desc& /* opd */,
instruction_ref parse(const op_desc& opd,
const onnx_parser& parser,
const onnx_parser::node_info& info,
const std::vector<instruction_ref>& args) const
{
check_inputs(args);

auto values = process_attributes(parser, info, args);
auto values = process_attributes(opd, parser, info, args);

// input: quantized x, scale, zero_pt
const instruction_ref& in_x = args[0];
Expand Down
4 changes: 2 additions & 2 deletions src/onnx/parse_quantizelinear.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -88,8 +88,8 @@ struct parse_quantizelinear : op_parser<parse_quantizelinear>
+", y_zero_point type: " + to_string(args[2]->get_shape().type()));
}

args =
transform_quantize_dequantize_linear_inputs(info, opd.op_name, block_size, axis, args);
args = transform_quantize_dequantize_linear_inputs(
info, opd.onnx_name, block_size, axis, args);

if(parser.opset_version < 19)
{
Expand Down
6 changes: 3 additions & 3 deletions src/onnx/parse_randomnormal_ops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ struct parse_randomnormal_ops : op_parser<parse_randomnormal_ops>
}
shape::type_t out_type = get_type(dtype);
if(not contains(valid_types, out_type))
MIGRAPHX_THROW(opd.op_name + ": invalid output type: " + std::to_string(dtype) +
MIGRAPHX_THROW(opd.onnx_name + ": invalid output type: " + std::to_string(dtype) +
". Valid types are 1 (float), 10 (half), and 11 (double).");

float mean = 0.0;
Expand All @@ -80,15 +80,15 @@ struct parse_randomnormal_ops : op_parser<parse_randomnormal_ops>
// output type and shape are the same as the input's by default
// dtype is used instead when attribute is set
if(not contains(valid_types, args[0]->get_shape().type()))
MIGRAPHX_THROW(opd.op_name + ": invalid output type: " +
MIGRAPHX_THROW(opd.onnx_name + ": invalid output type: " +
std::to_string(args[0]->get_shape().type()) +
". Valid types are float, half, and double.");
out_shape =
use_dtype ? shape{out_type, args[0]->get_shape().lens()} : args[0]->get_shape();
}
else
{
MIGRAPHX_THROW(opd.op_name +
MIGRAPHX_THROW(opd.onnx_name +
": cannot deduce shape without shape attribute or argument.");
}

Expand Down
6 changes: 3 additions & 3 deletions src/onnx/parse_randomuniform_ops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ struct parse_randomuniform_ops : op_parser<parse_randomuniform_ops>
}
shape::type_t out_type = get_type(dtype);
if(not contains(valid_types, out_type))
MIGRAPHX_THROW(opd.op_name + ": invalid output type: " + std::to_string(dtype) +
MIGRAPHX_THROW(opd.onnx_name + ": invalid output type: " + std::to_string(dtype) +
". Valid types are 1 (float), 10 (half), and 11 (double).");

float high = 1.0;
Expand All @@ -80,15 +80,15 @@ struct parse_randomuniform_ops : op_parser<parse_randomuniform_ops>
// output type and shape are the same as the input by default
// dtype is used instead when attribute is set
if(not contains(valid_types, args[0]->get_shape().type()))
MIGRAPHX_THROW(opd.op_name + ": invalid output type: " +
MIGRAPHX_THROW(opd.onnx_name + ": invalid output type: " +
std::to_string(args[0]->get_shape().type()) +
". Valid types are float, half, and double.");
out_shape =
use_dtype ? shape{out_type, args[0]->get_shape().lens()} : args[0]->get_shape();
}
else
{
MIGRAPHX_THROW(opd.op_name +
MIGRAPHX_THROW(opd.onnx_name +
": cannot deduce shape without shape attribute or argument.");
}

Expand Down
14 changes: 7 additions & 7 deletions src/onnx/parse_resize.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@
// at compile time). If true, we'll need to use Resize op.
static bool parse_args(const std::vector<instruction_ref>& args,
const std::vector<size_t>& in_lens,
const std::string& op_name,
const std::string& onnx_name,
std::vector<double>& vec_scale,
std::vector<std::size_t>& out_lens,
instruction_ref& r_arg)
Expand Down Expand Up @@ -168,7 +168,7 @@

if(out_lens.size() != in_lens.size())
{
MIGRAPHX_THROW("PARSE_" + op_name +
MIGRAPHX_THROW("PARSE_" + onnx_name +

Check warning on line 171 in src/onnx/parse_resize.cpp

View check run for this annotation

Codecov / codecov/patch

src/onnx/parse_resize.cpp#L171

Added line #L171 was not covered by tests
": specified output size's rank does not match input size");
}

Expand All @@ -195,7 +195,7 @@
return false;
}
}
MIGRAPHX_THROW("PARSE_" + op_name + ": no shapes or scales input provided");
MIGRAPHX_THROW("PARSE_" + onnx_name + ": no shapes or scales input provided");
}

struct parse_resize : op_parser<parse_resize>
Expand Down Expand Up @@ -264,7 +264,7 @@
if(contains(info.attributes, "exclude_outside") and
info.attributes.at("exclude_outside").i() == 1)
{
MIGRAPHX_THROW("PARSE_" + opd.op_name + ": exclude_outside 1 is not supported!");
MIGRAPHX_THROW("PARSE_" + opd.onnx_name + ": exclude_outside 1 is not supported!");

Check warning on line 267 in src/onnx/parse_resize.cpp

View check run for this annotation

Codecov / codecov/patch

src/onnx/parse_resize.cpp#L267

Added line #L267 was not covered by tests
}

// input data shape info
Expand All @@ -290,14 +290,14 @@
// Depending on the args, it *must* populate the `vec_scale`, and might populate
// `out_lens`
is_constant_scale_input =
not parse_args(args, in_lens, opd.op_name, vec_scale, out_lens, scales_sizes_arg);
not parse_args(args, in_lens, opd.onnx_name, vec_scale, out_lens, scales_sizes_arg);
}

if(is_constant_scale_input)
{
if(in_lens.size() != vec_scale.size())
{
MIGRAPHX_THROW("PARSE_" + opd.op_name +
MIGRAPHX_THROW("PARSE_" + opd.onnx_name +
": ranks of input and scale are different!");
}

Expand Down Expand Up @@ -346,7 +346,7 @@
// out_lens and other variables can't be populated if non-constant (runtime) size
// inputs.
if(not is_constant_scale_input)
MIGRAPHX_THROW("PARSE_" + opd.op_name +
MIGRAPHX_THROW("PARSE_" + opd.onnx_name +
": linear mode not supported for non-constant inputs");

shape out_s{in_s.type(), out_lens};
Expand Down
2 changes: 1 addition & 1 deletion src/onnx/parse_split.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@ struct parse_split : op_parser<parse_split>

const auto& input_shape = args[0]->get_shape();
// axis over which the split occurs (split_axis)
int64_t tuned_axis = tune_axis(input_shape.ndim(), axis, opd.op_name);
int64_t tuned_axis = tune_axis(input_shape.ndim(), axis, opd.onnx_name);

auto split_axis_is_fixed = [&]() {
return input_shape.dyn_dims().at(tuned_axis).is_fixed();
Expand Down
2 changes: 1 addition & 1 deletion src/onnx/parse_unique.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ struct parse_unique : op_parser<parse_unique>
{
auto n_dim = args[0]->get_shape().ndim();
axis = parser.parse_value(info.attributes.at("axis")).at<int>();
axis = tune_axis(n_dim, *axis, opd.op_name);
axis = tune_axis(n_dim, *axis, opd.onnx_name);
}
migraphx::argument data_arg = args.back()->eval();

Expand Down
2 changes: 1 addition & 1 deletion src/onnx/pooling.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ value handle_pooling_values(const op_desc& opd,
}

// ensure pads available only when auto_pad is "NOT_SET"
check_padding_mode(info, "POOLING");
check_padding_mode(info, opd.onnx_name);

return values;
}
Expand Down
15 changes: 8 additions & 7 deletions src/onnx/quantize_dequantize_linear.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ namespace onnx {

std::vector<instruction_ref>
transform_quantize_dequantize_linear_inputs(const onnx_parser::node_info& info,
const std::string& op_name,
const std::string& onnx_name,
int block_size,
int axis,
std::vector<instruction_ref> args)
Expand All @@ -57,10 +57,11 @@ transform_quantize_dequantize_linear_inputs(const onnx_parser::node_info& info,
// Per-axis granularity
else if(y_scale_rank == 1)
{
axis = tune_axis(x_rank, axis, op_name);
axis = tune_axis(x_rank, axis, onnx_name);
if(x_lens[axis] != y_scale_lens[0])
{
MIGRAPHX_THROW(op_name + ": For per axis granularity the length of y_scale (actual: " +
MIGRAPHX_THROW(onnx_name +
": For per axis granularity the length of y_scale (actual: " +
to_string(y_scale_lens[0]) + ") must be equal to size of x on axis " +
to_string(axis) + "(actual: " + to_string(x_lens[axis]) + ")");
}
Expand All @@ -73,11 +74,11 @@ transform_quantize_dequantize_linear_inputs(const onnx_parser::node_info& info,
// Blocked granularity
else
{
axis = tune_axis(x_rank, axis, op_name);
axis = tune_axis(x_rank, axis, onnx_name);

if(x_rank != y_scale_rank)
{
MIGRAPHX_THROW(op_name + ": x(rank: " + to_string(x_rank) +
MIGRAPHX_THROW(onnx_name + ": x(rank: " + to_string(x_rank) +
") and y_scale(rank: " + to_string(y_scale_rank) +
") must be of same rank for block granularity");
}
Expand All @@ -86,7 +87,7 @@ transform_quantize_dequantize_linear_inputs(const onnx_parser::node_info& info,
{
if(x_lens[i] != y_scale_lens[i] and i != axis)
{
MIGRAPHX_THROW(op_name + ": x(shape: " + to_string_range(x_lens) +
MIGRAPHX_THROW(onnx_name + ": x(shape: " + to_string_range(x_lens) +
") and y_scale(shape: " + to_string_range(y_scale_lens) +
") shapes may only differ along provided axis(" + to_string(axis) +
")");
Expand All @@ -103,7 +104,7 @@ transform_quantize_dequantize_linear_inputs(const onnx_parser::node_info& info,
if(block_size == 0)
block_size = block_size_min;
if(block_size < block_size_min or block_size > block_size_max)
MIGRAPHX_THROW(op_name + ": Block size(actual: " + to_string(block_size) +
MIGRAPHX_THROW(onnx_name + ": Block size(actual: " + to_string(block_size) +
") must be within range [" + to_string(block_size_min) + ", " +
to_string(block_size_max) + "]");

Expand Down
Loading