Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[MO] Move reverse input channels before mean/scale #9182

Merged
merged 1 commit into from
Dec 14, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -59,11 +59,14 @@ Framework-agnostic parameters:
--reverse_input_channels
Switch the input channels order from RGB to BGR (or
vice versa). Applied to original inputs of the model
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@nosovmik, why do we decide to stay "Applied to original inputs of the model..." instead of "Applied to user's input data..."?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It is a legacy behavior, and it is consistent with model's documentation. E.g. if model is RGB and states that means (120, 130, 140) shall be applied, but user has BGR input - in this case user will need to --mean_values(120,130,140) --reverse_input_channels instead of converting something in the mind and have --mean_values(140,130,120) --reverse_input_channels

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Let us merge it and continue this discussion offline

if and only if a number of channels equals 3. Applied
after application of --mean_values and --scale_values
options, so numbers in --mean_values and
--scale_values go in the order of channels used in the
original model.
if and only if a number of channels equals 3.
When --mean_values/--scale_values are also specified,
reversing of channels will be applied to user's input
data first, so that numbers in --mean_values and
--scale_values go in the order of channels used in
the original model. In other words, if both options are
specified then the data flow in the model looks as following:
Parameter -> ReverseInputChannels -> Mean/Scale apply -> the original body of the model.
--log_level {CRITICAL,ERROR,WARN,WARNING,INFO,DEBUG,NOTSET}
Logger level
--input INPUT Quoted list of comma-separated input nodes names with
Expand Down
12 changes: 6 additions & 6 deletions tools/mo/openvino/tools/mo/back/preprocessing.py
Original file line number Diff line number Diff line change
Expand Up @@ -372,6 +372,12 @@ def apply_preprocessing(ov_function: Model, argv: argparse.Namespace):
else:
prep.output(node_name).tensor().set_layout(Layout(layout_value['target_layout']))

# Apply reverse_input_channels
if need_reverse:
for name, _ in suitable_params_ric:
prep.input(name).preprocess().reverse_channels()
log.debug('reverse_input_channels pre-processing applied to {}'.format(name))

for node_name, node_mean_scale_values in mean_scale_values.items():
# Apply mean first, then scale
if node_mean_scale_values['mean'] is not None:
Expand All @@ -380,12 +386,6 @@ def apply_preprocessing(ov_function: Model, argv: argparse.Namespace):
prep.input(node_name).preprocess().scale(node_mean_scale_values['scale'])
log.debug('Mean/Scale pre-processing applied to {}'.format(node_name))

# Apply reverse_input_channels
if need_reverse:
for name, _ in suitable_params_ric:
prep.input(name).preprocess().reverse_channels()
log.debug('reverse_input_channels pre-processing applied to {}'.format(name))

# Apply pre-processing builder to a function
ov_function = prep.build()

Expand Down
10 changes: 6 additions & 4 deletions tools/mo/openvino/tools/mo/utils/cli_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -271,10 +271,12 @@ def get_common_cli_parser(parser: argparse.ArgumentParser = None):
'the original input of the model.')
common_group.add_argument('--reverse_input_channels',
help='Switch the input channels order from RGB to BGR (or vice versa). Applied to '
'original inputs of the model if and only if a number of channels equals 3. Applied '
'after application of --mean_values and --scale_values options, so numbers in '
'--mean_values and --scale_values go in the order of channels used in the original '
'model.',
'original inputs of the model if and only if a number of channels equals 3. '
'When --mean_values/--scale_values are also specified, reversing of channels will '
'be applied to user\'s input data first, so that numbers in --mean_values '
'and --scale_values go in the order of channels used in the original model. '
'In other words, if both options are specified, then the data flow in the model '
'looks as following: Parameter -> ReverseInputChannels -> Mean/Scale apply -> the original body of the model.',
action='store_true')
common_group.add_argument('--log_level',
help='Logger level',
Expand Down
64 changes: 50 additions & 14 deletions tools/mo/unit_tests/mo/back/moc_preprocessing_test_actual.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,25 +56,23 @@ class TestPreprocessingMOC(unittest.TestCase):
def setUp(self):
pass

def check_scale_constant(self, node, expected, shape=None):
const_node = node.input(1).get_source_output().get_node()
def check_constant(self, const_node, expected, shape=None):
self.assertEqual(const_node.get_type_name(), 'Constant')
if node.get_type_name() == 'Divide':
self.assertTrue(np.allclose(const_node.get_vector(), expected))
else:
self.assertTrue(np.allclose(const_node.get_vector(), 1. / expected))
if shape:
self.assertTrue(np.allclose(const_node.get_vector(), expected))
if shape is not None:
assert const_node.shape == PartialShape(shape)

def check_scale_constant(self, node, expected, shape=None):
const_node = node.input(1).get_source_output().get_node()
if node.get_type_name() != 'Divide':
expected = 1. / expected
self.check_constant(const_node, expected, shape)

def check_mean_constant(self, node, expected, shape=None):
const_node = node.input(1).get_source_output().get_node()
self.assertEqual(const_node.get_type_name(), 'Constant')
if node.get_type_name() == 'Subtract':
self.assertTrue(np.allclose(const_node.get_vector(), expected))
else:
self.assertTrue(np.allclose(const_node.get_vector(), -expected.toList()))
if shape:
self.assertEqual(const_node.shape, PartialShape(shape))
if node.get_type_name() != 'Subtract':
expected = -expected.toList()
self.check_constant(const_node, expected, shape)

def test_scale_single_value(self):
argv = Namespace(mean_scale_values=None, scale=2.0)
Expand Down Expand Up @@ -615,3 +613,41 @@ def test_no_reverse_channels_even_with_layout(self):
self.assertTrue(op_node0.get_type_name() == 'Relu')
op_node1 = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node()
self.assertTrue(op_node1.get_type_name() == 'Relu')

def test_reverse_channels_and_mean_scale(self):
argv = Namespace(reverse_input_channels=True, mean_scale_values={
'input2a': {
'mean': np.array([1., 2., 3.]),
'scale': np.array([2., 4., 8.])}},
scale=None)
function = create_function2(shape2=[1, 3, 224, 224])
process_function(ov_function=function, argv=argv)

# Verify that first is gather, then subtract 'mean', then 'scale'
gather = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node()
self.assertTrue(gather.get_type_name() == 'Gather')
range_node = gather.input(1).get_source_output().get_node()
self.assertTrue(range_node.get_type_name() == 'Range')
start = range_node.input(0).get_source_output().get_node()
end = range_node.input(1).get_source_output().get_node()
step = range_node.input(2).get_source_output().get_node()
self.check_constant(start, expected=[2], shape=[])
self.check_constant(end, expected=[-1], shape=[])
self.check_constant(step, expected=[-1], shape=[])
axes = gather.input(2).get_source_output().get_node()
self.check_constant(axes, expected=[1], shape=[1])

op_node = list(gather.output(0).get_target_inputs())[0].get_node()
self.assertTrue(op_node.get_type_name() == 'Subtract' or op_node.get_type_name() == 'Add')
self.check_mean_constant(op_node, expected=[1., 2., 3.], shape=[1, 3, 1, 1])

op_node = list(op_node.output(0).get_target_inputs())[0].get_node()
self.assertTrue(op_node.get_type_name() == 'Divide' or op_node.get_type_name() == 'Multiply')
self.check_scale_constant(op_node, expected=[2., 4., 8.], shape=[1, 3, 1, 1])

# Verify that input1 is not affected
op_node = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node()
self.assertEqual(op_node.get_type_name(), 'Relu')

# Verify that guessed layout (?C??) is not appeared in input2
self.assertEqual(function.get_parameters()[1].layout, Layout())