diff --git a/model-optimizer/mo/middle/passes/fusing/fuse_linear_ops.py b/model-optimizer/mo/middle/passes/fusing/fuse_linear_ops.py index 0c021b19e2fb91..1c011556c01e21 100644 --- a/model-optimizer/mo/middle/passes/fusing/fuse_linear_ops.py +++ b/model-optimizer/mo/middle/passes/fusing/fuse_linear_ops.py @@ -111,6 +111,17 @@ def _fuse_mul(graph: Graph, node: Node, fuse_nodes: list, backward: bool = True) weights_port.get_connection().set_source(w_mul.out_port(0)) w_const.connect(w_mul.in_port(tensor_port.idx)) + fuse_node_in_data = fuse_node.in_node(weights_port.idx) + w_const_out_data = w_const.node.out_node(w_const.idx) + + # During this reconnection new data node name is copied from the data node + # outgoing from w_const port. Duplicate names of data nodes lead to appearing + # of duplicate op node names after constant folding. So we should manually + # set a unique name for the new data node. + if fuse_node_in_data.soft_get('name') == w_const_out_data.soft_get('name') and \ + fuse_node_in_data.soft_get('name', None) is not None: + fuse_node.in_node(weights_port.idx)['name'] = graph.unique_id(mul_name) + # If we fuse in backward direction we should multiply biases if they exists if backward and len(fuse_node.in_ports()) == 3 and not fuse_node.in_port(2).disconnected() and \ not fuse_node.has_and_set('shape_input'): diff --git a/model-optimizer/unit_tests/mo/middle/passes/fusing/fuse_linear_ops_test.py b/model-optimizer/unit_tests/mo/middle/passes/fusing/fuse_linear_ops_test.py index 6626d61dba0690..76c1b67f8f7f38 100644 --- a/model-optimizer/unit_tests/mo/middle/passes/fusing/fuse_linear_ops_test.py +++ b/model-optimizer/unit_tests/mo/middle/passes/fusing/fuse_linear_ops_test.py @@ -817,6 +817,55 @@ def test_fuse_mul_to_deconv_1(self): (flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1', 'placeholder_1') self.assertTrue(flag, resp) + def test_fuse_mul_data_nodes_names(self): + graph = build_graph(nodes_attributes, + [('placeholder_1', 'placeholder_1_data'), + ('placeholder_1_data', 'mul_1'), + ('const_mul_1_w', 'mul_1_w'), + ('mul_1_w', 'mul_1'), + ('mul_1', 'mul_1_data'), + ('mul_1_data', 'conv_1'), + ('const_conv_1_w', 'conv_1_w'), + ('const_conv_1_b', 'conv_1_b'), + ('conv_1_w', 'conv_1'), + ('conv_1_b', 'conv_1'), + ('conv_1', 'conv_1_data'), + ('conv_1_data', 'op_output') + ], + {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, + 'mul_1_data': {'shape': np.array([1, 227, 227, 3])}, + 'const_mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, + 'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, + 'const_conv_1_w': {'shape': np.array([11, 11, 3, 96]), + 'value': np.ones((11, 11, 3, 96))}, + 'conv_1_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96)), + 'output_channel_dim': 3, 'input_channel_dim': 2, + 'dims_number': 4}, + 'const_conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)}, + 'conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)}, + 'conv_1_data': {} + }) + + _fuse_mul(graph, Node(graph, 'mul_1'), [Node(graph, 'conv_1')], backward=False) + + conv_node = Node(graph, 'conv_1') + conv_in_data_name = conv_node.in_node(1)['name'] + const_node = Node(graph, 'const_conv_1_w') + const_out_data_name = const_node.out_node(0)['name'] + mul_node = Node(graph, 'mul_1') + conv_in_data = conv_node.in_node(1) + + # Check that transformation doesn't produce identical data node names, + # as this may lead to appearing of Const ops with identical names. + self.assertFalse(conv_in_data_name == const_out_data_name) + + # Attributes that are required for fusing are kept on data nodes. + # These checks are needed to ensure that _fuse_mul doesn't remove any of these attributes. + self.assertTrue(conv_in_data['output_channel_dim'] == 3) + self.assertTrue(conv_in_data['input_channel_dim'] == 2) + self.assertTrue(conv_in_data['dims_number'] == 4) + self.assertTrue(mul_node['can_be_fused'] is True) + # Unit tests for fuse_linear_ops class FuseLinOpsTests(unittest.TestCase):