From 241cab31efdc754269cd940ce392b49f8fea8cb3 Mon Sep 17 00:00:00 2001 From: deepak Date: Wed, 6 May 2020 23:40:01 +0530 Subject: [PATCH 01/55] Implemented functionInvocation Unit Test for StatefulPartitionedCall operator(working) and initial changes for placeholder(not working as of now) --- python/tvm/relay/frontend/tensorflow.py | 67 +++++++++++++ .../frontend/tensorflow/test_forward.py | 99 +++++++++++++++++++ 2 files changed, 166 insertions(+) diff --git a/python/tvm/relay/frontend/tensorflow.py b/python/tvm/relay/frontend/tensorflow.py index 120631ea31dc..afbf5c243982 100644 --- a/python/tvm/relay/frontend/tensorflow.py +++ b/python/tvm/relay/frontend/tensorflow.py @@ -1930,6 +1930,28 @@ def _impl(inputs, attr, params, mod): return _res return _impl +def _partitioned_call(): + def _impl(inputs, attr, params): + if not isinstance(inputs, tuple): + inputs = list(inputs) + assert len(inputs) > 0, "add_n take >=1 inputs, but 0 given." + _res = inputs[0] + for each in inputs[1:]: + _res = _op.add(_res, each) + return _res + return _impl + +def _stateful_partitioned_call(): + def _impl(inputs, attr, params): + if not isinstance(inputs, tuple): + inputs = list(inputs) + assert len(inputs) > 0, "add_n take >=1 inputs, but 0 given." + _res = inputs[0] + for each in inputs[1:]: + _res = _op.add(_res, each) + return _res + return _impl + # compatible operators that do NOT require any conversion. _identity_list = [] @@ -2028,6 +2050,8 @@ def _impl(inputs, attr, params, mod): 'NotEqual' : _broadcast('not_equal'), 'OneHot' : _one_hot(), 'Pack' : _pack(), + 'PartitionedCall' : _partitioned_call(), + 'StatefulPartitionedCall' : _stateful_partitioned_call(), 'Pad' : _pad('Pad'), 'PadV2' : _pad('PadV2'), 'Pow' : _elemwise('power'), @@ -2713,6 +2737,7 @@ def __init__(self): self._loop_var_order = {} self._hash2tfnode = {} self._while_loop_name_set = set() + self._subgraphs = {} def from_tensorflow(self, graph, layout="NHWC", shape=None, outputs=None): """Construct relay nodes from tensorflow graph definition - GraphDef. @@ -2824,6 +2849,27 @@ def from_tensorflow(self, graph, layout="NHWC", shape=None, outputs=None): self._while_loop_name_set.add(node_name_prefix) control_flow_nodes.append(node) + if graph.library.function: + f1 = graph.library.function[0] + if f1.signature.name not in self._subgraphs: + from tensorflow.python.framework import function_def_to_graph + sub = function_def_to_graph.function_def_to_graph_def(f1) + self._subgraphs.update({f1.signature.name: 'started adding'}) + self._subgraphs.update({f1.signature.name: self.from_tensorflow(sub[0])}) + + # if graph.library.function and not self.libFuncs: + # for func in graph.library.function: + # self.libFuncs.append(func.signature.name) + # + # if graph.library.function and not self.libFuncsDict: + # for func in graph.library.function: + # if func.signature.name not in self._subgraphs: + # from tensorflow.python.framework import function_def_to_graph + # sub = function_def_to_graph.function_def_to_graph_def(func) + # self.libFuncsDict[func.signature.name] = sub[0] + # for func in self.libFuncsDict: + # self._subgraphs.update({func: 'started adding'}) + # self._subgraphs.update({func: self.from_tensorflow(self.libFuncsDict[func])}) # First, parse all control flow nodes. # Convert tf.cond to Branch and tf.while_loop to Loop. sorted_cf_nodes = [] @@ -3241,6 +3287,27 @@ def _backtrack_construct(self, node_name): in_op = in_op[0] inputs.append(in_op) + if node.op in ["PartitionedCall", "StatefulPartitionedCall"]: + f1 = self._subgraphs[attr["f"].name][0]["main"] + # add_one = tvm.relay.GlobalVar("add_one") + # self._mod[add_one] = self._subgraphs[attr["f"].name][0]["main"] + wl = tvm.relay.var('partitioned_call') + sb = tvm.relay.scope_builder.ScopeBuilder() + sb.let(wl, f1) + + sb.ret(wl(*inputs)) + op = sb.get() + # elif node.op in self.libFuncs: + # f1 = self._subgraphs[attr["f"].name][0]["main"] + # # add_one = tvm.relay.GlobalVar("add_one") + # # self._mod[add_one] = self._subgraphs[attr["f"].name][0]["main"] + # wl = tvm.relay.var('partitioned_call') + # sb = tvm.relay.scope_builder.ScopeBuilder() + # sb.let(wl, f1) + # + # sb.ret(wl(*inputs)) + # op = sb.get() + else: op = self._convert_operator(node.op, inputs, attr, self._graph) if isinstance(op, np.ndarray): diff --git a/tests/python/frontend/tensorflow/test_forward.py b/tests/python/frontend/tensorflow/test_forward.py index 858ef6b5c40e..7d56469ccbb6 100644 --- a/tests/python/frontend/tensorflow/test_forward.py +++ b/tests/python/frontend/tensorflow/test_forward.py @@ -36,6 +36,10 @@ from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.ops import init_ops +from tensorflow.python.framework import function +from tensorflow.python.framework import ops +from tensorflow.python.framework import dtypes +from tensorflow.python.ops import gen_functional_ops from distutils.version import LooseVersion import tvm from tvm import te @@ -3202,11 +3206,106 @@ def test_forward_isinf(): def test_forward_isfinite(): _verify_infiniteness_ops(tf.is_finite, "isfinite") +def test_spop_function_invocation(): + tf.reset_default_graph() + with tf.Graph().as_default(): + + def fun1(a): + return tf.multiply(a,a) + + def fun2(b): + return tf.multiply(b,10) + + @function.Defun(dtypes.float32, dtypes.float32, func_name="Fun3") + def fun3(x,y): + x = fun2(x) + y = fun1(y) + z = tf.add(x,y) + return z + + op = gen_functional_ops.StatefulPartitionedCall(args=[tf.constant(10.5),tf.constant(20.4)], + Tout=[dtypes.float32], f=fun3, name="SpopFnInvocation") + compare_tf_with_tvm([],[], 'SpopFnInvocation:0', mode='vm', init_global_variables=True) + +def test_spop_placeholder_default(): + with tf.Graph().as_default(): + pl1 = tf.placeholder_with_default(20, tf.int32, name="pl1") + pl2 = tf.placeholder_with_default(10, tf.int32, name="pl2") + data = tf.constant(30) + data2 = tf.constant(40) + + @function.Defun(tf.int32, tf.int32) + def Forward(x, y): + # def Forward(x, y) ->[tf.int32]: + # Do not create placeholders in Defun methods..placeholders should be created outside of Defun()..and can be passed inside it + print(x.name) + print(y.name) + return tf.add(x, y) + + z = gen_functional_ops.StatefulPartitionedCall(args=[pl1, pl2], Tout=[tf.int32], f=Forward) + + feed = {'pl1:0': data, 'pl2:0': data2} + compare_tf_with_tvm([data, data2], ['pl1:0', 'pl2:0'], + 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) + +def test_spop_placeholder_dimension_error(): + tf.reset_default_graph() + with tf.Graph().as_default(): + pl1 = tf.placeholder(tf.int32, name="pl1") + pl2 = tf.placeholder(tf.int32, name="pl2") + data = np.array([[-1, 1], [2, -2]], dtype=np.int32) + data2 = np.array([[-2, 3], [4, -6]], dtype=np.int32) + + @function.Defun(*[tf.int32] * 2) + def Forward(x, y): + # def Forward(x, y) ->[tf.int32]: + # Do not create placeholders in Defun methods..placeholders should be created outside of Defun()..and can be passed inside it + print(x.name) + print(y.name) + return tf.add(x, y) + + z = gen_functional_ops.StatefulPartitionedCall(args=[pl1, pl2], Tout=[tf.int32], f=Forward) + + feed = {'pl1:0': data, 'pl2:0': data2} + compare_tf_with_tvm([data, data2], ['pl1:0', 'pl2:0'], + 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) + +def tvm_frontend_placeholder_spop_error(): + tf.reset_default_graph() + with tf.Graph().as_default(): + in_data1 = np.random.uniform(-5, 5, size=(3, 4, 5)).astype(np.float32) + var1 = tf.Variable(in_data1, name='in1') + place1 = array_ops.placeholder_with_default(var1, shape=in_data1.shape, name='Place1') + + in_data2 = np.random.uniform(-5, 5, size=(3, 4, 5)).astype(np.float32) + place2 = array_ops.placeholder( + shape=in_data1.shape, dtype=in_data1.dtype, name='Place2') + + @function.Defun(*[dtypes.float32] * 2) + def Body1(x, y): + out1 = tf.math.add(var1, x, name='out1') + out2 = tf.math.add(out1, y, name='out2') + return out2 + + op = gen_functional_ops.StatefulPartitionedCall(args=[place1, place2], + Tout=[dtypes.float32], f=Body1) + + compare_tf_with_tvm([in_data1, in_data2], ['Place1:0', 'Place2:0'], 'out2:0', mode='vm', init_global_variables=True) + +def test_spop(): + test_spop_function_invocation() + tvm_frontend_placeholder_spop_error() + test_spop_placeholder_dimension_error() + test_spop_placeholder_default() + + ####################################################################### # Main # ---- if __name__ == '__main__': + # StatefulPartitionedOp + test_spop() # Transforms test_forward_slice() test_forward_transpose() From 6fe8c1bc35a3fe5003ba2a2fbea8ca3acabc679b Mon Sep 17 00:00:00 2001 From: deepak Date: Fri, 8 May 2020 00:10:10 +0530 Subject: [PATCH 02/55] Placeholder exercises with tvm --- .../frontend/tensorflow/test_forward.py | 64 ++++++++++++++----- 1 file changed, 48 insertions(+), 16 deletions(-) diff --git a/tests/python/frontend/tensorflow/test_forward.py b/tests/python/frontend/tensorflow/test_forward.py index 7d56469ccbb6..cad09285f9b9 100644 --- a/tests/python/frontend/tensorflow/test_forward.py +++ b/tests/python/frontend/tensorflow/test_forward.py @@ -3228,26 +3228,59 @@ def fun3(x,y): compare_tf_with_tvm([],[], 'SpopFnInvocation:0', mode='vm', init_global_variables=True) def test_spop_placeholder_default(): + with tf.Graph().as_default(): - pl1 = tf.placeholder_with_default(20, tf.int32, name="pl1") - pl2 = tf.placeholder_with_default(10, tf.int32, name="pl2") - data = tf.constant(30) - data2 = tf.constant(40) + data = np.ones([1], dtype=int).astype(np.int32) + dataVar = tf.Variable(data, shape=data.shape) + pl1 = array_ops.placeholder_with_default(dataVar,shape=data.shape,name="pl1") + tpl = tf.convert_to_tensor(pl1, dtype=tf.int32) - @function.Defun(tf.int32, tf.int32) - def Forward(x, y): - # def Forward(x, y) ->[tf.int32]: - # Do not create placeholders in Defun methods..placeholders should be created outside of Defun()..and can be passed inside it - print(x.name) - print(y.name) - return tf.add(x, y) + @function.Defun(*[tf.int32]) + def pl_with_default(pl): + # tpl = tf.convert_to_tensor(pl, dtype=tf.int32) + return tf.expand_dims(tf.multiply(pl, pl), 0) - z = gen_functional_ops.StatefulPartitionedCall(args=[pl1, pl2], Tout=[tf.int32], f=Forward) + # fn = pl_with_default(pl1) + # + # with tf.Session() as sess: + # sess.run(tf.global_variables_initializer()) + # print("hello ji..the output is as follows: ") + # sess.run(fn) + # print(sess.run(fn)) - feed = {'pl1:0': data, 'pl2:0': data2} - compare_tf_with_tvm([data, data2], ['pl1:0', 'pl2:0'], + + z = gen_functional_ops.StatefulPartitionedCall(args=[tpl], Tout=[tf.int32], f=pl_with_default) + compare_tf_with_tvm(data, ['pl1:0'], 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) + # in_a = tf.placeholder(dtype=tf.int32, shape=(1, 2), name="in_a") + # @function.Defun(*[tf.int32]) + # def forward(x): + # W = tf.Variable([1,2]) + # b = tf.Variable([1,3]) + # # b = tf.get_variable("b", initializer=tf.zeros(shape=(2))) + # return W * x + b + # + # # out_a = forward(in_a) + # data = [1,0] + # z = gen_functional_ops.StatefulPartitionedCall(args=[in_a], Tout=[tf.int32], f=forward) + # compare_tf_with_tvm(data, 'in_a:0', + # 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) +# ******************************************************************* + # a = tf.constant(5, tf.int32, name='A') + # pl = tf.placeholder(tf.int32, name='Pl1', shape=(1)) + # + # @function.Defun(tf.int32, tf.int32) + # def Forward(x, y): + # return tf.add(x, y) + # # return tf.expand_dims(tf.convert_to_tensor(tf.add(x, y, name="Forward"), dtype=tf.int32),0) + # + # z = gen_functional_ops.StatefulPartitionedCall(args=[a, pl], Tout=[tf.int32], f=Forward) + # input_shape = [1] + # x = np.arange(1, dtype=np.int32).reshape(input_shape) + # compare_tf_with_tvm([x], ['Pl1:0'], + # 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) + def test_spop_placeholder_dimension_error(): tf.reset_default_graph() with tf.Graph().as_default(): @@ -3294,10 +3327,9 @@ def Body1(x, y): def test_spop(): test_spop_function_invocation() + test_spop_placeholder_default() tvm_frontend_placeholder_spop_error() test_spop_placeholder_dimension_error() - test_spop_placeholder_default() - ####################################################################### From aeb80730d51326a8e3df352726d5c429403880c7 Mon Sep 17 00:00:00 2001 From: deepak Date: Mon, 11 May 2020 12:46:20 +0530 Subject: [PATCH 03/55] placeholder interim --- 3rdparty/dmlc-core | 2 +- tests/python/frontend/tensorflow/test_forward.py | 8 +++++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/3rdparty/dmlc-core b/3rdparty/dmlc-core index ff3db4367a30..808f485387f9 160000 --- a/3rdparty/dmlc-core +++ b/3rdparty/dmlc-core @@ -1 +1 @@ -Subproject commit ff3db4367a30f542aafb83b4af45e685b80102d0 +Subproject commit 808f485387f9a03f78fa9f1159f387d0d91b7a28 diff --git a/tests/python/frontend/tensorflow/test_forward.py b/tests/python/frontend/tensorflow/test_forward.py index cad09285f9b9..e6c89b8667e3 100644 --- a/tests/python/frontend/tensorflow/test_forward.py +++ b/tests/python/frontend/tensorflow/test_forward.py @@ -3239,6 +3239,7 @@ def test_spop_placeholder_default(): def pl_with_default(pl): # tpl = tf.convert_to_tensor(pl, dtype=tf.int32) return tf.expand_dims(tf.multiply(pl, pl), 0) + # return tf.expand_dims(tf.multiply(pl, pl), 0) # fn = pl_with_default(pl1) # @@ -3303,7 +3304,7 @@ def Forward(x, y): compare_tf_with_tvm([data, data2], ['pl1:0', 'pl2:0'], 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) -def tvm_frontend_placeholder_spop_error(): +def tvm_frontend_placeholder_invalid_input_index_error(): tf.reset_default_graph() with tf.Graph().as_default(): in_data1 = np.random.uniform(-5, 5, size=(3, 4, 5)).astype(np.float32) @@ -3327,9 +3328,10 @@ def Body1(x, y): def test_spop(): test_spop_function_invocation() - test_spop_placeholder_default() - tvm_frontend_placeholder_spop_error() + tvm_frontend_placeholder_invalid_input_index_error() test_spop_placeholder_dimension_error() + test_spop_placeholder_default() + ####################################################################### From 011985748acc206bf24c1d119bb2b631039bb0e6 Mon Sep 17 00:00:00 2001 From: deepak Date: Mon, 11 May 2020 13:35:01 +0530 Subject: [PATCH 04/55] SPOP Test cases structure --- .../frontend/tensorflow/test_forward.py | 22 +++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/tests/python/frontend/tensorflow/test_forward.py b/tests/python/frontend/tensorflow/test_forward.py index e6c89b8667e3..23a6c4a499bf 100644 --- a/tests/python/frontend/tensorflow/test_forward.py +++ b/tests/python/frontend/tensorflow/test_forward.py @@ -3326,12 +3326,30 @@ def Body1(x, y): compare_tf_with_tvm([in_data1, in_data2], ['Place1:0', 'Place2:0'], 'out2:0', mode='vm', init_global_variables=True) -def test_spop(): - test_spop_function_invocation() +def test_spop_arithmetic(): + pass + +def test_spop_control_flow(): + pass + +def test_spop_variables(): + pass + +def test_spop_constants(): + pass + +def test_spop_placeholder(): tvm_frontend_placeholder_invalid_input_index_error() test_spop_placeholder_dimension_error() test_spop_placeholder_default() +def test_spop(): + test_spop_function_invocation() + test_spop_arithmetic() + test_spop_control_flow() + test_spop_variables() + test_spop_constants() + test_spop_placeholder() ####################################################################### From 077ca4d8ea175aac554468b27f342fd2f3a186a0 Mon Sep 17 00:00:00 2001 From: deepak Date: Tue, 12 May 2020 09:16:41 +0530 Subject: [PATCH 05/55] New test cases for spop --- python/tvm/relay/frontend/tensorflow.py | 2 +- .../frontend/tensorflow/test_forward.py | 82 +++++++++++++++++-- 2 files changed, 76 insertions(+), 8 deletions(-) diff --git a/python/tvm/relay/frontend/tensorflow.py b/python/tvm/relay/frontend/tensorflow.py index afbf5c243982..1e8300c25f9b 100644 --- a/python/tvm/relay/frontend/tensorflow.py +++ b/python/tvm/relay/frontend/tensorflow.py @@ -3262,6 +3262,7 @@ def _backtrack_construct(self, node_name): Converted relay expression """ node_name = node_name.split(':')[0].split("^")[-1] + inputs = [] if node_name not in self._nodes: node = self._tf_node_map[node_name] @@ -3276,7 +3277,6 @@ def _backtrack_construct(self, node_name): attr["_output_shapes"] = self._output_shapes[node_name] attr["_node_name"] = node.name attr["_target_layout"] = self._layout - inputs = [] for iname in node.input: in_op = self._backtrack_construct(iname) if isinstance(in_op, _expr.TupleWrapper): diff --git a/tests/python/frontend/tensorflow/test_forward.py b/tests/python/frontend/tensorflow/test_forward.py index 23a6c4a499bf..e3f95bdb9854 100644 --- a/tests/python/frontend/tensorflow/test_forward.py +++ b/tests/python/frontend/tensorflow/test_forward.py @@ -3304,7 +3304,7 @@ def Forward(x, y): compare_tf_with_tvm([data, data2], ['pl1:0', 'pl2:0'], 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) -def tvm_frontend_placeholder_invalid_input_index_error(): +def test_frontend_placeholder_invalid_input_index_error(): tf.reset_default_graph() with tf.Graph().as_default(): in_data1 = np.random.uniform(-5, 5, size=(3, 4, 5)).astype(np.float32) @@ -3326,20 +3326,87 @@ def Body1(x, y): compare_tf_with_tvm([in_data1, in_data2], ['Place1:0', 'Place2:0'], 'out2:0', mode='vm', init_global_variables=True) +def test_spop_placeholder_one(): + print("Inside placeholder function") + tf.reset_default_graph() + g = tf.Graph() + with g.as_default(): + + @function.Defun(*[tf.int32]*2) + def Forward(x,y): + #Do not create placeholders in Defun methods..placeholders should be created outside of Defun()..and can be passed inside it + print(x.name) + print(y.name) + b = tf.add(x, y) + return b + pl1 = tf.placeholder(tf.int32,name="pl1") + pl2 = tf.placeholder(tf.int32,name="pl2") + data = np.array([[-1, 1], [2, -2]], dtype=np.int32) + data2 = np.array([[-2, 3], [4, -6]], dtype=np.int32) + z = gen_functional_ops.StatefulPartitionedCall(args=[pl1,pl2], Tout=[tf.int32],f=Forward) + + feed = {"pl1:0": data,"pl2:0": data2} + + compare_tf_with_tvm([data, data2], ['pl1:0', 'pl2:0'], 'StatefulPartitionedCall:0', mode='vm', + init_global_variables=True) + def test_spop_arithmetic(): - pass + tf.reset_default_graph() + with tf.Graph().as_default(): + @function.Defun(*[dtypes.int32]*3) + def arithmetic(m,x,c): + z = tf.add(tf.multiply(m, x), c) + return z + + m = tf.constant(10) + x = tf.constant(20) + c = tf.constant(2) + spopFn = gen_functional_ops.StatefulPartitionedCall(args=[m,x,c],Tout=[tf.int32], f=arithmetic) + + compare_tf_with_tvm([],[],'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) def test_spop_control_flow(): - pass + tf.reset_default_graph() + with tf.Graph().as_default(): + # WSTART + @function.Defun(*[dtypes.float32] * 2) + def Body1(x, y): + with ops.device("/job:localhost/replica:0/task:0/device:CPU:0"): + # z = Body2 + z = math_ops.multiply(x, y) + i = 0 + while i<10 : + i +=1 + if i == 5: + continue + z = math_ops.multiply(x, y*i) + return z + + op = gen_functional_ops.StatefulPartitionedCall(args=[constant_op.constant(32.), + constant_op.constant(100.)], + Tout=[dtypes.float32], f=Body1) + compare_tf_with_tvm([], [], 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) def test_spop_variables(): pass def test_spop_constants(): - pass + tf.reset_default_graph() + with tf.Graph().as_default(): + @function.Defun(*[dtypes.int32] * 2) + def constantsFn(x, y): + z = tf.add(x,y) + return z + + a = tf.constant(20, name = "a") + b = tf.constant(40, name = "b") + spopFn = gen_functional_ops.StatefulPartitionedCall(args=[a, b], Tout=[tf.int32], f=constantsFn) + + compare_tf_with_tvm([], [], 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) def test_spop_placeholder(): - tvm_frontend_placeholder_invalid_input_index_error() + test_spop_placeholder_one() + test_frontend_placeholder_invalid_input_index_error() test_spop_placeholder_dimension_error() test_spop_placeholder_default() @@ -3349,7 +3416,8 @@ def test_spop(): test_spop_control_flow() test_spop_variables() test_spop_constants() - test_spop_placeholder() + test_spop_placeholder_one() + # test_spop_placeholder() ####################################################################### @@ -3357,7 +3425,7 @@ def test_spop(): # ---- if __name__ == '__main__': # StatefulPartitionedOp - test_spop() + # test_spop() # Transforms test_forward_slice() test_forward_transpose() From bdff27ca198ae9de6e351e1c8030e414cae0a559 Mon Sep 17 00:00:00 2001 From: deepak Date: Tue, 12 May 2020 12:41:02 +0530 Subject: [PATCH 06/55] miscellaneous test cases for spop --- python/tvm/relay/frontend/tensorflow.py | 1 + .../frontend/tensorflow/test_forward.py | 34 ++++++++++++++++--- 2 files changed, 30 insertions(+), 5 deletions(-) diff --git a/python/tvm/relay/frontend/tensorflow.py b/python/tvm/relay/frontend/tensorflow.py index 1e8300c25f9b..8cfe431f8a61 100644 --- a/python/tvm/relay/frontend/tensorflow.py +++ b/python/tvm/relay/frontend/tensorflow.py @@ -3297,6 +3297,7 @@ def _backtrack_construct(self, node_name): sb.ret(wl(*inputs)) op = sb.get() + print(op) # elif node.op in self.libFuncs: # f1 = self._subgraphs[attr["f"].name][0]["main"] # # add_one = tvm.relay.GlobalVar("add_one") diff --git a/tests/python/frontend/tensorflow/test_forward.py b/tests/python/frontend/tensorflow/test_forward.py index e3f95bdb9854..39a2a6fc0c4d 100644 --- a/tests/python/frontend/tensorflow/test_forward.py +++ b/tests/python/frontend/tensorflow/test_forward.py @@ -3346,7 +3346,6 @@ def Forward(x,y): z = gen_functional_ops.StatefulPartitionedCall(args=[pl1,pl2], Tout=[tf.int32],f=Forward) feed = {"pl1:0": data,"pl2:0": data2} - compare_tf_with_tvm([data, data2], ['pl1:0', 'pl2:0'], 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) @@ -3382,13 +3381,38 @@ def Body1(x, y): z = math_ops.multiply(x, y*i) return z - op = gen_functional_ops.StatefulPartitionedCall(args=[constant_op.constant(32.), - constant_op.constant(100.)], - Tout=[dtypes.float32], f=Body1) + op = gen_functional_ops.StatefulPartitionedCall(args=[constant_op.constant(32.), constant_op.constant(100.)], Tout=[dtypes.float32], f=Body1) + # @function.Defun() + # def test_vanilla_loop(): + # i = tf.constant(0) + # + # def c(i): return tf.less(i, 10) + # + # def b(i): return tf.add(i, 1) + # r = tf.while_loop(c, b, [i]) + # return r + # z = gen_functional_ops.StatefulPartitionedCall(args=[], Tout=[tf.int32], f=test_vanilla_loop) compare_tf_with_tvm([], [], 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) def test_spop_variables(): - pass + tf.reset_default_graph() + with tf.Graph().as_default(): + data = np.random.uniform(size=(32, 100)).astype('float32') + + @function.Defun() + def variableFn(): + input_op = array_ops.placeholder(shape=data.shape, dtype=data.dtype) + input_tensor = array_ops.reshape(input_op, data.shape) + + size = input_tensor.shape.dims[1] + with variable_scope.variable_scope("linear", reuse=None): + w = variable_scope.get_variable( + "w", shape=[size, size], dtype=input_tensor.dtype) + ret = math_ops.matmul(input_tensor, w) + return ret + + z = gen_functional_ops.StatefulPartitionedCall(args=[], Tout=[dtypes.float32], f=variableFn) + compare_tf_with_tvm(data, 'Placeholder:0', 'StatefulPartitionedCall:0', init_global_variables=True, mode="vm") def test_spop_constants(): tf.reset_default_graph() From 8ec980c1dc31ab246a8329ce839f73306d063668 Mon Sep 17 00:00:00 2001 From: deepak Date: Wed, 13 May 2020 01:40:03 +0530 Subject: [PATCH 07/55] Placeholder samples..working with shapes explicitly passed --- python/tvm/relay/frontend/tensorflow.py | 9 ++- .../frontend/tensorflow/test_forward.py | 56 ++++++++++++++----- 2 files changed, 49 insertions(+), 16 deletions(-) diff --git a/python/tvm/relay/frontend/tensorflow.py b/python/tvm/relay/frontend/tensorflow.py index 8a1d74882aa6..02a86889c0be 100644 --- a/python/tvm/relay/frontend/tensorflow.py +++ b/python/tvm/relay/frontend/tensorflow.py @@ -2861,8 +2861,15 @@ def from_tensorflow(self, graph, layout="NHWC", shape=None, outputs=None): if f1.signature.name not in self._subgraphs: from tensorflow.python.framework import function_def_to_graph sub = function_def_to_graph.function_def_to_graph_def(f1) + print(sub) + i = 0 + newshape = {} + for key,value in self._in_shape.items(): + newshape[sub[0].node[i].name] = value + i+=1 self._subgraphs.update({f1.signature.name: 'started adding'}) - self._subgraphs.update({f1.signature.name: self.from_tensorflow(sub[0])}) + self._subgraphs.update({f1.signature.name: self.from_tensorflow(sub[0], shape=newshape)}) + # self._subgraphs.update({f1.signature.name: self.from_tensorflow(sub[0])}) # if graph.library.function and not self.libFuncs: # for func in graph.library.function: diff --git a/tests/python/frontend/tensorflow/test_forward.py b/tests/python/frontend/tensorflow/test_forward.py index 9f2a11cfebf8..4668b42a6b97 100644 --- a/tests/python/frontend/tensorflow/test_forward.py +++ b/tests/python/frontend/tensorflow/test_forward.py @@ -181,7 +181,7 @@ def name_without_num(name): sess.run(variables.global_variables_initializer()) final_graph_def = tf_testing.AddShapesToGraphDef(sess, out_node) tf_output = run_tf_graph(sess, in_data, in_name, out_name) - + file_writer = tf.summary.FileWriter("/home/deepak/tfgraphsoutput/", sess.graph) for device in ["llvm", "cuda"]: ctx = tvm.context(device, 0) if not ctx.exist: @@ -3287,12 +3287,24 @@ def Body1(x, y): compare_tf_with_tvm([in_data1, in_data2], ['Place1:0', 'Place2:0'], 'out2:0', mode='vm', init_global_variables=True) +def test_spop_placeholder_three(): + tf.disable_eager_execution() + t1 = tf.placeholder(tf.int32, (3, 3, 3), "t1") + t1_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3)) + t2 = tf.placeholder(tf.int32, (3, 3, 3), "t2") + t2_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3)) + @tf.function(input_signature=[tf.TensorSpec(shape=(3,3,3), dtype=tf.int32), tf.TensorSpec(shape=(3,3,3), dtype=tf.int32)]) + def add(x, y): + return tf.add(x, y, "add_t1_t2") + t3 = add(t1, t2) + compare_tf_with_tvm([t1_data, t2_data], ['t1:0', 't2:0'], [t3.name], mode='vm', init_global_variables=True) + def test_spop_placeholder_one(): print("Inside placeholder function") tf.reset_default_graph() g = tf.Graph() with g.as_default(): - + # @function.Defun(tf.TensorSpec(shape=(3,3,3), dtype=tf.int32), tf.TensorSpec(shape=(3,3,3), dtype=tf.int32)) @function.Defun(*[tf.int32]*2) def Forward(x,y): #Do not create placeholders in Defun methods..placeholders should be created outside of Defun()..and can be passed inside it @@ -3302,12 +3314,16 @@ def Forward(x,y): return b pl1 = tf.placeholder(tf.int32,name="pl1") pl2 = tf.placeholder(tf.int32,name="pl2") + pl3 = tf.placeholder(tf.int32, name="pl3") data = np.array([[-1, 1], [2, -2]], dtype=np.int32) data2 = np.array([[-2, 3], [4, -6]], dtype=np.int32) - z = gen_functional_ops.StatefulPartitionedCall(args=[pl1,pl2], Tout=[tf.int32],f=Forward) + data3 = np.array([[-2, 3], [4, -6]], dtype=np.int32) + z1 = gen_functional_ops.StatefulPartitionedCall(args=[pl1,pl2], Tout=[tf.int32],f=Forward) + z2 = z1 + pl3 feed = {"pl1:0": data,"pl2:0": data2} - compare_tf_with_tvm([data, data2], ['pl1:0', 'pl2:0'], 'StatefulPartitionedCall:0', mode='vm', + compare_tf_with_tvm([data, data2, data3], ['pl1:0', 'pl2:0', 'pl3:0'], ['StatefulPartitionedCall:0',z2.name], + mode='vm', init_global_variables=True) def test_spop_arithmetic(): @@ -3325,7 +3341,7 @@ def arithmetic(m,x,c): compare_tf_with_tvm([],[],'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) -def test_spop_control_flow(): +def test_spop_control_flow_one(): tf.reset_default_graph() with tf.Graph().as_default(): # WSTART @@ -3343,18 +3359,28 @@ def Body1(x, y): return z op = gen_functional_ops.StatefulPartitionedCall(args=[constant_op.constant(32.), constant_op.constant(100.)], Tout=[dtypes.float32], f=Body1) - # @function.Defun() - # def test_vanilla_loop(): - # i = tf.constant(0) - # - # def c(i): return tf.less(i, 10) - # - # def b(i): return tf.add(i, 1) - # r = tf.while_loop(c, b, [i]) - # return r - # z = gen_functional_ops.StatefulPartitionedCall(args=[], Tout=[tf.int32], f=test_vanilla_loop) compare_tf_with_tvm([], [], 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) +def test_spop_control_flow_two(): + tf.reset_default_graph() + with tf.Graph().as_default(): + @function.Defun() + def vanilla_loop(): + i = tf.constant(0) + + def c(i): return tf.less(i, 10) + + def b(i): return tf.add(i, 1) + r = tf.while_loop(c, b, [i]) + return r + z = gen_functional_ops.StatefulPartitionedCall(args=[], Tout=[tf.int32], f=vanilla_loop) + compare_tf_with_tvm([], [], 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) + +def test_spop_control_flow(): + test_spop_control_flow_one() + # test_spop_control_flow_two() + test_spop_placeholder_three() + def test_spop_variables(): tf.reset_default_graph() with tf.Graph().as_default(): From 8c4a48f5d87788fb9db86f9b3946b6a2e278f6d6 Mon Sep 17 00:00:00 2001 From: deepak Date: Wed, 13 May 2020 09:47:29 +0530 Subject: [PATCH 08/55] Variables test case. Works with the same fix of shape_dict --- .../frontend/tensorflow/test_forward.py | 20 +++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/tests/python/frontend/tensorflow/test_forward.py b/tests/python/frontend/tensorflow/test_forward.py index 4668b42a6b97..c9f246e7b78d 100644 --- a/tests/python/frontend/tensorflow/test_forward.py +++ b/tests/python/frontend/tensorflow/test_forward.py @@ -3382,6 +3382,23 @@ def test_spop_control_flow(): test_spop_placeholder_three() def test_spop_variables(): + tf.reset_default_graph() + g = tf.Graph() + with g.as_default(): + + @function.Defun(tf.int32,tf.int32) + def Forward(x,y): + #create variables outside Defun() method, you can pass variables inside Defun method though + return tf.multiply(x,y) + const1 = tf.constant(10) + const2 = tf.constant(20) + var1 = tf.Variable(const1, dtype=tf.int32) + var2 = tf.Variable(const2, dtype=tf.int32) + + z = gen_functional_ops.StatefulPartitionedCall(args=[var1,var2],Tout=[tf.int32], f=Forward) + compare_tf_with_tvm([], [], 'StatefulPartitionedCall:0', init_global_variables=True, mode="vm") + +def test_spop_variables_one(): tf.reset_default_graph() with tf.Graph().as_default(): data = np.random.uniform(size=(32, 100)).astype('float32') @@ -3393,8 +3410,7 @@ def variableFn(): size = input_tensor.shape.dims[1] with variable_scope.variable_scope("linear", reuse=None): - w = variable_scope.get_variable( - "w", shape=[size, size], dtype=input_tensor.dtype) + w = variable_scope.get_variable("w", shape=[size, size], dtype=input_tensor.dtype) ret = math_ops.matmul(input_tensor, w) return ret From 9aaa6506d713c052553f491ae3540de43f8f0cad Mon Sep 17 00:00:00 2001 From: deepak Date: Wed, 13 May 2020 16:59:01 +0530 Subject: [PATCH 09/55] SPOP Positive test cases first iteration --- python/tvm/relay/frontend/tensorflow.py | 9 +- .../frontend/tensorflow/test_forward.py | 126 +++++------------- 2 files changed, 37 insertions(+), 98 deletions(-) diff --git a/python/tvm/relay/frontend/tensorflow.py b/python/tvm/relay/frontend/tensorflow.py index 02a86889c0be..3758832ba826 100644 --- a/python/tvm/relay/frontend/tensorflow.py +++ b/python/tvm/relay/frontend/tensorflow.py @@ -2860,8 +2860,13 @@ def from_tensorflow(self, graph, layout="NHWC", shape=None, outputs=None): f1 = graph.library.function[0] if f1.signature.name not in self._subgraphs: from tensorflow.python.framework import function_def_to_graph + # sub, nested_to_flat_tensor_name = function_def_to_graph.function_def_to_graph_def(f1, f1.attr[ + # "_input_shapes"].list.shape) sub = function_def_to_graph.function_def_to_graph_def(f1) - print(sub) + #FOR MAHESH: Below is new logic which is failing when we can't deduce shapes for input data(tensor) + # subgraph_shape_dict = {f_arg.name: self._in_shape[node_input] for f_arg, node_input in + # zip(f1.signature.input_arg, node.input)} + #FOR MAHESH: The following hack works for placeholders and variables if in_shape has all shapes info i = 0 newshape = {} for key,value in self._in_shape.items(): @@ -2869,7 +2874,7 @@ def from_tensorflow(self, graph, layout="NHWC", shape=None, outputs=None): i+=1 self._subgraphs.update({f1.signature.name: 'started adding'}) self._subgraphs.update({f1.signature.name: self.from_tensorflow(sub[0], shape=newshape)}) - # self._subgraphs.update({f1.signature.name: self.from_tensorflow(sub[0])}) + # self._subgraphs.update({f1.signature.name: self.from_tensorflow(sub)}) # if graph.library.function and not self.libFuncs: # for func in graph.library.function: diff --git a/tests/python/frontend/tensorflow/test_forward.py b/tests/python/frontend/tensorflow/test_forward.py index c9f246e7b78d..22f216124a61 100644 --- a/tests/python/frontend/tensorflow/test_forward.py +++ b/tests/python/frontend/tensorflow/test_forward.py @@ -3188,7 +3188,7 @@ def fun3(x,y): Tout=[dtypes.float32], f=fun3, name="SpopFnInvocation") compare_tf_with_tvm([],[], 'SpopFnInvocation:0', mode='vm', init_global_variables=True) -def test_spop_placeholder_default(): +def test_spop_placeholder_two(): with tf.Graph().as_default(): data = np.ones([1], dtype=int).astype(np.int32) @@ -3198,94 +3198,10 @@ def test_spop_placeholder_default(): @function.Defun(*[tf.int32]) def pl_with_default(pl): - # tpl = tf.convert_to_tensor(pl, dtype=tf.int32) return tf.expand_dims(tf.multiply(pl, pl), 0) - # return tf.expand_dims(tf.multiply(pl, pl), 0) - - # fn = pl_with_default(pl1) - # - # with tf.Session() as sess: - # sess.run(tf.global_variables_initializer()) - # print("hello ji..the output is as follows: ") - # sess.run(fn) - # print(sess.run(fn)) - z = gen_functional_ops.StatefulPartitionedCall(args=[tpl], Tout=[tf.int32], f=pl_with_default) - compare_tf_with_tvm(data, ['pl1:0'], - 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) - - # in_a = tf.placeholder(dtype=tf.int32, shape=(1, 2), name="in_a") - # @function.Defun(*[tf.int32]) - # def forward(x): - # W = tf.Variable([1,2]) - # b = tf.Variable([1,3]) - # # b = tf.get_variable("b", initializer=tf.zeros(shape=(2))) - # return W * x + b - # - # # out_a = forward(in_a) - # data = [1,0] - # z = gen_functional_ops.StatefulPartitionedCall(args=[in_a], Tout=[tf.int32], f=forward) - # compare_tf_with_tvm(data, 'in_a:0', - # 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) -# ******************************************************************* - # a = tf.constant(5, tf.int32, name='A') - # pl = tf.placeholder(tf.int32, name='Pl1', shape=(1)) - # - # @function.Defun(tf.int32, tf.int32) - # def Forward(x, y): - # return tf.add(x, y) - # # return tf.expand_dims(tf.convert_to_tensor(tf.add(x, y, name="Forward"), dtype=tf.int32),0) - # - # z = gen_functional_ops.StatefulPartitionedCall(args=[a, pl], Tout=[tf.int32], f=Forward) - # input_shape = [1] - # x = np.arange(1, dtype=np.int32).reshape(input_shape) - # compare_tf_with_tvm([x], ['Pl1:0'], - # 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) - -def test_spop_placeholder_dimension_error(): - tf.reset_default_graph() - with tf.Graph().as_default(): - pl1 = tf.placeholder(tf.int32, name="pl1") - pl2 = tf.placeholder(tf.int32, name="pl2") - data = np.array([[-1, 1], [2, -2]], dtype=np.int32) - data2 = np.array([[-2, 3], [4, -6]], dtype=np.int32) - - @function.Defun(*[tf.int32] * 2) - def Forward(x, y): - # def Forward(x, y) ->[tf.int32]: - # Do not create placeholders in Defun methods..placeholders should be created outside of Defun()..and can be passed inside it - print(x.name) - print(y.name) - return tf.add(x, y) - - z = gen_functional_ops.StatefulPartitionedCall(args=[pl1, pl2], Tout=[tf.int32], f=Forward) - - feed = {'pl1:0': data, 'pl2:0': data2} - compare_tf_with_tvm([data, data2], ['pl1:0', 'pl2:0'], - 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) - -def test_frontend_placeholder_invalid_input_index_error(): - tf.reset_default_graph() - with tf.Graph().as_default(): - in_data1 = np.random.uniform(-5, 5, size=(3, 4, 5)).astype(np.float32) - var1 = tf.Variable(in_data1, name='in1') - place1 = array_ops.placeholder_with_default(var1, shape=in_data1.shape, name='Place1') - - in_data2 = np.random.uniform(-5, 5, size=(3, 4, 5)).astype(np.float32) - place2 = array_ops.placeholder( - shape=in_data1.shape, dtype=in_data1.dtype, name='Place2') - - @function.Defun(*[dtypes.float32] * 2) - def Body1(x, y): - out1 = tf.math.add(var1, x, name='out1') - out2 = tf.math.add(out1, y, name='out2') - return out2 - - op = gen_functional_ops.StatefulPartitionedCall(args=[place1, place2], - Tout=[dtypes.float32], f=Body1) - - compare_tf_with_tvm([in_data1, in_data2], ['Place1:0', 'Place2:0'], 'out2:0', mode='vm', init_global_variables=True) + compare_tf_with_tvm(data, ['pl1:0'], 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) def test_spop_placeholder_three(): tf.disable_eager_execution() @@ -3293,7 +3209,24 @@ def test_spop_placeholder_three(): t1_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3)) t2 = tf.placeholder(tf.int32, (3, 3, 3), "t2") t2_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3)) - @tf.function(input_signature=[tf.TensorSpec(shape=(3,3,3), dtype=tf.int32), tf.TensorSpec(shape=(3,3,3), dtype=tf.int32)]) + + # @tf.function(input_signature=[tf.TensorSpec(shape=(3, 3, 3), dtype=tf.int32), + # tf.TensorSpec(shape=(3, 3, 3), dtype=tf.int32)]) + @tf.function() + def add(x, y): + return tf.add(x, y, "add_t1_t2") + t3 = add(t1, t2) + compare_tf_with_tvm([t1_data, t2_data], ['t1:0', 't2:0'], [t3.name], mode='vm', init_global_variables=True) + +def test_spop_placeholder_four(): + tf.disable_eager_execution() + t1 = tf.placeholder(tf.int32,name="t1") + t1_data = np.array([[-1, 1, 3], [2, -2, 4], [2, -3, 14]], dtype=np.int32) + t2 = tf.placeholder(tf.int32, name="t2") + t2_data = np.array([[-2, 1, 2], [12, -2, 14], [12, -3, 4]], dtype=np.int32) + + @tf.function(input_signature=[tf.TensorSpec(shape=(3, 3, 3), dtype=tf.int32), + tf.TensorSpec(shape=(3, 3, 3), dtype=tf.int32)]) def add(x, y): return tf.add(x, y, "add_t1_t2") t3 = add(t1, t2) @@ -3320,11 +3253,14 @@ def Forward(x,y): data3 = np.array([[-2, 3], [4, -6]], dtype=np.int32) z1 = gen_functional_ops.StatefulPartitionedCall(args=[pl1,pl2], Tout=[tf.int32],f=Forward) z2 = z1 + pl3 - - feed = {"pl1:0": data,"pl2:0": data2} - compare_tf_with_tvm([data, data2, data3], ['pl1:0', 'pl2:0', 'pl3:0'], ['StatefulPartitionedCall:0',z2.name], + compare_tf_with_tvm([data, data2], ['pl1:0', 'pl2:0'], + 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) + # compare_tf_with_tvm([data, data2, data3], ['pl1:0', 'pl2:0', 'pl3:0'], + # ['StatefulPartitionedCall:0',z2.name], + # mode='vm', + # init_global_variables=True) def test_spop_arithmetic(): tf.reset_default_graph() @@ -3379,7 +3315,6 @@ def b(i): return tf.add(i, 1) def test_spop_control_flow(): test_spop_control_flow_one() # test_spop_control_flow_two() - test_spop_placeholder_three() def test_spop_variables(): tf.reset_default_graph() @@ -3433,9 +3368,9 @@ def constantsFn(x, y): def test_spop_placeholder(): test_spop_placeholder_one() - test_frontend_placeholder_invalid_input_index_error() - test_spop_placeholder_dimension_error() - test_spop_placeholder_default() + test_spop_placeholder_two() + test_spop_placeholder_three() + # test_spop_placeholder_four() def test_spop(): test_spop_function_invocation() @@ -3443,8 +3378,7 @@ def test_spop(): test_spop_control_flow() test_spop_variables() test_spop_constants() - test_spop_placeholder_one() - # test_spop_placeholder() + test_spop_placeholder() ####################################################################### From a1ee137d08b0a53957fb80895201748e87fd8591 Mon Sep 17 00:00:00 2001 From: Prashant Sail Date: Thu, 14 May 2020 11:52:32 +0530 Subject: [PATCH 10/55] support output tensors as function args, multiple functions --- python/tvm/relay/frontend/tensorflow.py | 51 +++++++++++++++---------- 1 file changed, 31 insertions(+), 20 deletions(-) diff --git a/python/tvm/relay/frontend/tensorflow.py b/python/tvm/relay/frontend/tensorflow.py index 3758832ba826..a5011e86211b 100644 --- a/python/tvm/relay/frontend/tensorflow.py +++ b/python/tvm/relay/frontend/tensorflow.py @@ -2855,26 +2855,37 @@ def from_tensorflow(self, graph, layout="NHWC", shape=None, outputs=None): if node.op == "Exit": self._while_loop_name_set.add(node_name_prefix) control_flow_nodes.append(node) - - if graph.library.function: - f1 = graph.library.function[0] - if f1.signature.name not in self._subgraphs: - from tensorflow.python.framework import function_def_to_graph - # sub, nested_to_flat_tensor_name = function_def_to_graph.function_def_to_graph_def(f1, f1.attr[ - # "_input_shapes"].list.shape) - sub = function_def_to_graph.function_def_to_graph_def(f1) - #FOR MAHESH: Below is new logic which is failing when we can't deduce shapes for input data(tensor) - # subgraph_shape_dict = {f_arg.name: self._in_shape[node_input] for f_arg, node_input in - # zip(f1.signature.input_arg, node.input)} - #FOR MAHESH: The following hack works for placeholders and variables if in_shape has all shapes info - i = 0 - newshape = {} - for key,value in self._in_shape.items(): - newshape[sub[0].node[i].name] = value - i+=1 - self._subgraphs.update({f1.signature.name: 'started adding'}) - self._subgraphs.update({f1.signature.name: self.from_tensorflow(sub[0], shape=newshape)}) - # self._subgraphs.update({f1.signature.name: self.from_tensorflow(sub)}) + elif node.op in ["PartitionedCall", "StatefulPartitionedCall"]: + node_fname = node.attr.get('f').func.name + f1 = next((func for func in graph.library.function if func.signature.name == node_fname), None) + if f1 and f1.signature.name not in self._subgraphs: + from tensorflow.python.framework import function_def_to_graph + f1_input_shapes = f1.attr["_input_shapes"].list.shape + subgraph, flat_tensor_name = function_def_to_graph.function_def_to_graph_def(f1, f1_input_shapes) + subgraph_shape_dict = {f_arg.name: _infer_shape(self._nodes[node_input][0]) for f_arg, node_input in zip(f1.signature.input_arg, node.input)} + tf_graph = self.from_tensorflow(subgraph, shape=subgraph_shape_dict) + self._subgraphs.update({f1.signature.name: tf_graph}) + self._backtrack_construct(node.name) + + # if graph.library.function: + # f1 = graph.library.function[0] + # if f1.signature.name not in self._subgraphs: + # from tensorflow.python.framework import function_def_to_graph + # # sub, nested_to_flat_tensor_name = function_def_to_graph.function_def_to_graph_def(f1, f1.attr[ + # # "_input_shapes"].list.shape) + # sub = function_def_to_graph.function_def_to_graph_def(f1) + # #FOR MAHESH: Below is new logic which is failing when we can't deduce shapes for input data(tensor) + # # subgraph_shape_dict = {f_arg.name: self._in_shape[node_input] for f_arg, node_input in + # # zip(f1.signature.input_arg, node.input)} + # #FOR MAHESH: The following hack works for placeholders and variables if in_shape has all shapes info + # i = 0 + # newshape = {} + # for key,value in self._in_shape.items(): + # newshape[sub[0].node[i].name] = value + # i+=1 + # self._subgraphs.update({f1.signature.name: 'started adding'}) + # self._subgraphs.update({f1.signature.name: self.from_tensorflow(sub[0], shape=newshape)}) + # # self._subgraphs.update({f1.signature.name: self.from_tensorflow(sub)}) # if graph.library.function and not self.libFuncs: # for func in graph.library.function: From 444f4f8aa2e9b43c48d77c111d59fdb1b087b63a Mon Sep 17 00:00:00 2001 From: Prashant Sail Date: Thu, 14 May 2020 12:14:29 +0530 Subject: [PATCH 11/55] Corrected Indentation --- python/tvm/relay/frontend/tensorflow.py | 47 ++++--------------------- 1 file changed, 7 insertions(+), 40 deletions(-) diff --git a/python/tvm/relay/frontend/tensorflow.py b/python/tvm/relay/frontend/tensorflow.py index a5011e86211b..6661a66b9718 100644 --- a/python/tvm/relay/frontend/tensorflow.py +++ b/python/tvm/relay/frontend/tensorflow.py @@ -2860,46 +2860,13 @@ def from_tensorflow(self, graph, layout="NHWC", shape=None, outputs=None): f1 = next((func for func in graph.library.function if func.signature.name == node_fname), None) if f1 and f1.signature.name not in self._subgraphs: from tensorflow.python.framework import function_def_to_graph - f1_input_shapes = f1.attr["_input_shapes"].list.shape - subgraph, flat_tensor_name = function_def_to_graph.function_def_to_graph_def(f1, f1_input_shapes) - subgraph_shape_dict = {f_arg.name: _infer_shape(self._nodes[node_input][0]) for f_arg, node_input in zip(f1.signature.input_arg, node.input)} - tf_graph = self.from_tensorflow(subgraph, shape=subgraph_shape_dict) - self._subgraphs.update({f1.signature.name: tf_graph}) - self._backtrack_construct(node.name) - - # if graph.library.function: - # f1 = graph.library.function[0] - # if f1.signature.name not in self._subgraphs: - # from tensorflow.python.framework import function_def_to_graph - # # sub, nested_to_flat_tensor_name = function_def_to_graph.function_def_to_graph_def(f1, f1.attr[ - # # "_input_shapes"].list.shape) - # sub = function_def_to_graph.function_def_to_graph_def(f1) - # #FOR MAHESH: Below is new logic which is failing when we can't deduce shapes for input data(tensor) - # # subgraph_shape_dict = {f_arg.name: self._in_shape[node_input] for f_arg, node_input in - # # zip(f1.signature.input_arg, node.input)} - # #FOR MAHESH: The following hack works for placeholders and variables if in_shape has all shapes info - # i = 0 - # newshape = {} - # for key,value in self._in_shape.items(): - # newshape[sub[0].node[i].name] = value - # i+=1 - # self._subgraphs.update({f1.signature.name: 'started adding'}) - # self._subgraphs.update({f1.signature.name: self.from_tensorflow(sub[0], shape=newshape)}) - # # self._subgraphs.update({f1.signature.name: self.from_tensorflow(sub)}) - - # if graph.library.function and not self.libFuncs: - # for func in graph.library.function: - # self.libFuncs.append(func.signature.name) - # - # if graph.library.function and not self.libFuncsDict: - # for func in graph.library.function: - # if func.signature.name not in self._subgraphs: - # from tensorflow.python.framework import function_def_to_graph - # sub = function_def_to_graph.function_def_to_graph_def(func) - # self.libFuncsDict[func.signature.name] = sub[0] - # for func in self.libFuncsDict: - # self._subgraphs.update({func: 'started adding'}) - # self._subgraphs.update({func: self.from_tensorflow(self.libFuncsDict[func])}) + f1_input_shapes = f1.attr["_input_shapes"].list.shape + subgraph, flat_tensor_name = function_def_to_graph.function_def_to_graph_def(f1, f1_input_shapes) + subgraph_shape_dict = {f_arg.name: _infer_shape(self._nodes[node_input][0]) for f_arg, node_input in zip(f1.signature.input_arg, node.input)} + tf_graph = self.from_tensorflow(subgraph, shape=subgraph_shape_dict) + self._subgraphs.update({f1.signature.name: tf_graph}) + self._backtrack_construct(node.name) + # First, parse all control flow nodes. # Convert tf.cond to Branch and tf.while_loop to Loop. sorted_cf_nodes = [] From 6ce53c440d2568607a155f8be532d799d5dfac4f Mon Sep 17 00:00:00 2001 From: Prashant Sail Date: Thu, 14 May 2020 12:15:03 +0530 Subject: [PATCH 12/55] filewritter is only for debug purpose --- tests/python/frontend/tensorflow/test_forward.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/python/frontend/tensorflow/test_forward.py b/tests/python/frontend/tensorflow/test_forward.py index 22f216124a61..fac90686367a 100644 --- a/tests/python/frontend/tensorflow/test_forward.py +++ b/tests/python/frontend/tensorflow/test_forward.py @@ -181,7 +181,6 @@ def name_without_num(name): sess.run(variables.global_variables_initializer()) final_graph_def = tf_testing.AddShapesToGraphDef(sess, out_node) tf_output = run_tf_graph(sess, in_data, in_name, out_name) - file_writer = tf.summary.FileWriter("/home/deepak/tfgraphsoutput/", sess.graph) for device in ["llvm", "cuda"]: ctx = tvm.context(device, 0) if not ctx.exist: From e0eb16667f266f18d033bdc7dd05033b7e282927 Mon Sep 17 00:00:00 2001 From: Prashant Sail Date: Thu, 14 May 2020 16:13:47 +0530 Subject: [PATCH 13/55] support variables in function args --- python/tvm/relay/frontend/tensorflow.py | 36 +++++++++++++++++-------- 1 file changed, 25 insertions(+), 11 deletions(-) diff --git a/python/tvm/relay/frontend/tensorflow.py b/python/tvm/relay/frontend/tensorflow.py index 6661a66b9718..edf7aa60de9f 100644 --- a/python/tvm/relay/frontend/tensorflow.py +++ b/python/tvm/relay/frontend/tensorflow.py @@ -2745,6 +2745,7 @@ def __init__(self): self._hash2tfnode = {} self._while_loop_name_set = set() self._subgraphs = {} + self._subgraphFunctions = [] def from_tensorflow(self, graph, layout="NHWC", shape=None, outputs=None): """Construct relay nodes from tensorflow graph definition - GraphDef. @@ -2796,6 +2797,10 @@ def from_tensorflow(self, graph, layout="NHWC", shape=None, outputs=None): self._layout = layout self._graph = graph + # ToDo: need _subgraphFunctions as self._graph gets updated on recurrsive calls + self._subgraphFunctions += graph.library.function + + if missing_operators: freezed_ops = [op for op in missing_operators if op in _freezed_graph_pruned_op_list] if freezed_ops: @@ -2855,17 +2860,6 @@ def from_tensorflow(self, graph, layout="NHWC", shape=None, outputs=None): if node.op == "Exit": self._while_loop_name_set.add(node_name_prefix) control_flow_nodes.append(node) - elif node.op in ["PartitionedCall", "StatefulPartitionedCall"]: - node_fname = node.attr.get('f').func.name - f1 = next((func for func in graph.library.function if func.signature.name == node_fname), None) - if f1 and f1.signature.name not in self._subgraphs: - from tensorflow.python.framework import function_def_to_graph - f1_input_shapes = f1.attr["_input_shapes"].list.shape - subgraph, flat_tensor_name = function_def_to_graph.function_def_to_graph_def(f1, f1_input_shapes) - subgraph_shape_dict = {f_arg.name: _infer_shape(self._nodes[node_input][0]) for f_arg, node_input in zip(f1.signature.input_arg, node.input)} - tf_graph = self.from_tensorflow(subgraph, shape=subgraph_shape_dict) - self._subgraphs.update({f1.signature.name: tf_graph}) - self._backtrack_construct(node.name) # First, parse all control flow nodes. # Convert tf.cond to Branch and tf.while_loop to Loop. @@ -3285,6 +3279,26 @@ def _backtrack_construct(self, node_name): inputs.append(in_op) if node.op in ["PartitionedCall", "StatefulPartitionedCall"]: + + + node_fname = node.attr.get('f').func.name + f1 = next((func for func in self._subgraphFunctions if func.signature.name == node_fname), None) + if f1 and f1.signature.name not in self._subgraphs: + from tensorflow.python.framework import function_def_to_graph + f1_input_shapes = f1.attr["_input_shapes"].list.shape + subgraph, flat_tensor_name = function_def_to_graph.function_def_to_graph_def(f1, f1_input_shapes) + + subgraph_shape_dict = {} + for f_arg, node_input in zip(f1.signature.input_arg, node.input): + input_tensor = self._nodes.get(node_input, None) + if input_tensor: + subgraph_shape_dict[f_arg.name] = _infer_shape(input_tensor[0]) + # subgraph_shape_dict = {f_arg.name: _infer_shape(self._nodes.get(node_input,[None])[0]) for f_arg, node_input in zip(f1.signature.input_arg, node.input)} + tf_graph = self.from_tensorflow(subgraph, shape=subgraph_shape_dict) + self._subgraphs.update({f1.signature.name: tf_graph}) + # self._backtrack_construct(node.name) + + f1 = self._subgraphs[attr["f"].name][0]["main"] # add_one = tvm.relay.GlobalVar("add_one") # self._mod[add_one] = self._subgraphs[attr["f"].name][0]["main"] From cb1ec4dafd41448e3765ecbd89595b446c088163 Mon Sep 17 00:00:00 2001 From: deepak Date: Thu, 14 May 2020 17:19:06 +0530 Subject: [PATCH 14/55] First working iteration of positive spop test cases --- python/tvm/relay/frontend/tensorflow.py | 64 ++++++------------- .../frontend/tensorflow/test_forward.py | 2 +- 2 files changed, 20 insertions(+), 46 deletions(-) diff --git a/python/tvm/relay/frontend/tensorflow.py b/python/tvm/relay/frontend/tensorflow.py index 3758832ba826..3adf175debf4 100644 --- a/python/tvm/relay/frontend/tensorflow.py +++ b/python/tvm/relay/frontend/tensorflow.py @@ -2745,6 +2745,7 @@ def __init__(self): self._hash2tfnode = {} self._while_loop_name_set = set() self._subgraphs = {} + self._subgraphFunctions = [] def from_tensorflow(self, graph, layout="NHWC", shape=None, outputs=None): """Construct relay nodes from tensorflow graph definition - GraphDef. @@ -2796,6 +2797,9 @@ def from_tensorflow(self, graph, layout="NHWC", shape=None, outputs=None): self._layout = layout self._graph = graph + # ToDo: need _subgraphFunctions as self._graph gets updated on recursive calls + self._subgraphFunctions += graph.library.function + if missing_operators: freezed_ops = [op for op in missing_operators if op in _freezed_graph_pruned_op_list] if freezed_ops: @@ -2856,39 +2860,6 @@ def from_tensorflow(self, graph, layout="NHWC", shape=None, outputs=None): self._while_loop_name_set.add(node_name_prefix) control_flow_nodes.append(node) - if graph.library.function: - f1 = graph.library.function[0] - if f1.signature.name not in self._subgraphs: - from tensorflow.python.framework import function_def_to_graph - # sub, nested_to_flat_tensor_name = function_def_to_graph.function_def_to_graph_def(f1, f1.attr[ - # "_input_shapes"].list.shape) - sub = function_def_to_graph.function_def_to_graph_def(f1) - #FOR MAHESH: Below is new logic which is failing when we can't deduce shapes for input data(tensor) - # subgraph_shape_dict = {f_arg.name: self._in_shape[node_input] for f_arg, node_input in - # zip(f1.signature.input_arg, node.input)} - #FOR MAHESH: The following hack works for placeholders and variables if in_shape has all shapes info - i = 0 - newshape = {} - for key,value in self._in_shape.items(): - newshape[sub[0].node[i].name] = value - i+=1 - self._subgraphs.update({f1.signature.name: 'started adding'}) - self._subgraphs.update({f1.signature.name: self.from_tensorflow(sub[0], shape=newshape)}) - # self._subgraphs.update({f1.signature.name: self.from_tensorflow(sub)}) - - # if graph.library.function and not self.libFuncs: - # for func in graph.library.function: - # self.libFuncs.append(func.signature.name) - # - # if graph.library.function and not self.libFuncsDict: - # for func in graph.library.function: - # if func.signature.name not in self._subgraphs: - # from tensorflow.python.framework import function_def_to_graph - # sub = function_def_to_graph.function_def_to_graph_def(func) - # self.libFuncsDict[func.signature.name] = sub[0] - # for func in self.libFuncsDict: - # self._subgraphs.update({func: 'started adding'}) - # self._subgraphs.update({func: self.from_tensorflow(self.libFuncsDict[func])}) # First, parse all control flow nodes. # Convert tf.cond to Branch and tf.while_loop to Loop. sorted_cf_nodes = [] @@ -3307,9 +3278,22 @@ def _backtrack_construct(self, node_name): inputs.append(in_op) if node.op in ["PartitionedCall", "StatefulPartitionedCall"]: + node_fname = node.attr.get('f').func.name + f1 = next((func for func in self._subgraphFunctions if func.signature.name == node_fname), None) + if f1 and f1.signature.name not in self._subgraphs: + from tensorflow.python.framework import function_def_to_graph + f1_input_shapes = f1.attr["_input_shapes"].list.shape + subgraph, flat_tensor_name = function_def_to_graph.function_def_to_graph_def(f1, f1_input_shapes) + + subgraph_shape_dict = {} + for f_arg, node_input in zip(f1.signature.input_arg, node.input): + input_tensor = self._nodes.get(node_input, None) + if input_tensor: + subgraph_shape_dict[f_arg.name] = _infer_shape(input_tensor[0]) + tf_graph = self.from_tensorflow(subgraph, shape=subgraph_shape_dict) + self._subgraphs.update({f1.signature.name: tf_graph}) + f1 = self._subgraphs[attr["f"].name][0]["main"] - # add_one = tvm.relay.GlobalVar("add_one") - # self._mod[add_one] = self._subgraphs[attr["f"].name][0]["main"] wl = tvm.relay.var('partitioned_call') sb = tvm.relay.scope_builder.ScopeBuilder() sb.let(wl, f1) @@ -3317,16 +3301,6 @@ def _backtrack_construct(self, node_name): sb.ret(wl(*inputs)) op = sb.get() print(op) - # elif node.op in self.libFuncs: - # f1 = self._subgraphs[attr["f"].name][0]["main"] - # # add_one = tvm.relay.GlobalVar("add_one") - # # self._mod[add_one] = self._subgraphs[attr["f"].name][0]["main"] - # wl = tvm.relay.var('partitioned_call') - # sb = tvm.relay.scope_builder.ScopeBuilder() - # sb.let(wl, f1) - # - # sb.ret(wl(*inputs)) - # op = sb.get() else: op = self._convert_operator(node.op, inputs, attr, self._graph) diff --git a/tests/python/frontend/tensorflow/test_forward.py b/tests/python/frontend/tensorflow/test_forward.py index 22f216124a61..6d42473ed9fa 100644 --- a/tests/python/frontend/tensorflow/test_forward.py +++ b/tests/python/frontend/tensorflow/test_forward.py @@ -181,7 +181,7 @@ def name_without_num(name): sess.run(variables.global_variables_initializer()) final_graph_def = tf_testing.AddShapesToGraphDef(sess, out_node) tf_output = run_tf_graph(sess, in_data, in_name, out_name) - file_writer = tf.summary.FileWriter("/home/deepak/tfgraphsoutput/", sess.graph) + # file_writer = tf.summary.FileWriter("/home/deepak/tfgraphsoutput/", sess.graph) for device in ["llvm", "cuda"]: ctx = tvm.context(device, 0) if not ctx.exist: From a49369e9f66c7b6ce744e8a12837c60be727fab5 Mon Sep 17 00:00:00 2001 From: Prashant Sail Date: Thu, 14 May 2020 17:39:32 +0530 Subject: [PATCH 15/55] Removed commented code, simplified code --- python/tvm/relay/frontend/tensorflow.py | 66 ++++++++++--------------- 1 file changed, 27 insertions(+), 39 deletions(-) diff --git a/python/tvm/relay/frontend/tensorflow.py b/python/tvm/relay/frontend/tensorflow.py index edf7aa60de9f..9bc90c3e5ab0 100644 --- a/python/tvm/relay/frontend/tensorflow.py +++ b/python/tvm/relay/frontend/tensorflow.py @@ -2744,8 +2744,7 @@ def __init__(self): self._loop_var_order = {} self._hash2tfnode = {} self._while_loop_name_set = set() - self._subgraphs = {} - self._subgraphFunctions = [] + self._graphLibraryFunctions = [] def from_tensorflow(self, graph, layout="NHWC", shape=None, outputs=None): """Construct relay nodes from tensorflow graph definition - GraphDef. @@ -2797,8 +2796,10 @@ def from_tensorflow(self, graph, layout="NHWC", shape=None, outputs=None): self._layout = layout self._graph = graph - # ToDo: need _subgraphFunctions as self._graph gets updated on recurrsive calls - self._subgraphFunctions += graph.library.function + # ToDo: Need a better way to reference graph.library.function + # "_graphLibraryFunctions+=" is used to accumulate all library functions + # as parent graph\self._graph is lost after recursive calls to from_tensorflow + self._graphLibraryFunctions += graph.library.function if missing_operators: @@ -3279,46 +3280,33 @@ def _backtrack_construct(self, node_name): inputs.append(in_op) if node.op in ["PartitionedCall", "StatefulPartitionedCall"]: - - - node_fname = node.attr.get('f').func.name - f1 = next((func for func in self._subgraphFunctions if func.signature.name == node_fname), None) - if f1 and f1.signature.name not in self._subgraphs: + tf_graph = None + node_func_name = node.attr.get('f').func.name + func = next((f for f in self._graphLibraryFunctions if f.signature.name == node_func_name), None) + if func: from tensorflow.python.framework import function_def_to_graph - f1_input_shapes = f1.attr["_input_shapes"].list.shape - subgraph, flat_tensor_name = function_def_to_graph.function_def_to_graph_def(f1, f1_input_shapes) + # Convert function definition to graph + func_input_shapes = func.attr["_input_shapes"].list.shape + subgraph, flat_tensor_name = function_def_to_graph.function_def_to_graph_def(func, func_input_shapes) + + # Computing subgraph's input shape dictionary subgraph_shape_dict = {} - for f_arg, node_input in zip(f1.signature.input_arg, node.input): + for f_arg, node_input in zip(func.signature.input_arg, node.input): input_tensor = self._nodes.get(node_input, None) if input_tensor: - subgraph_shape_dict[f_arg.name] = _infer_shape(input_tensor[0]) - # subgraph_shape_dict = {f_arg.name: _infer_shape(self._nodes.get(node_input,[None])[0]) for f_arg, node_input in zip(f1.signature.input_arg, node.input)} - tf_graph = self.from_tensorflow(subgraph, shape=subgraph_shape_dict) - self._subgraphs.update({f1.signature.name: tf_graph}) - # self._backtrack_construct(node.name) - - - f1 = self._subgraphs[attr["f"].name][0]["main"] - # add_one = tvm.relay.GlobalVar("add_one") - # self._mod[add_one] = self._subgraphs[attr["f"].name][0]["main"] - wl = tvm.relay.var('partitioned_call') - sb = tvm.relay.scope_builder.ScopeBuilder() - sb.let(wl, f1) - - sb.ret(wl(*inputs)) - op = sb.get() - print(op) - # elif node.op in self.libFuncs: - # f1 = self._subgraphs[attr["f"].name][0]["main"] - # # add_one = tvm.relay.GlobalVar("add_one") - # # self._mod[add_one] = self._subgraphs[attr["f"].name][0]["main"] - # wl = tvm.relay.var('partitioned_call') - # sb = tvm.relay.scope_builder.ScopeBuilder() - # sb.let(wl, f1) - # - # sb.ret(wl(*inputs)) - # op = sb.get() + subgraph_shape_dict[f_arg.name] = _infer_shape(input_tensor[0]) # ToDo: Is [0] always a safe access ? + + # Construct relay nodes from the subgraph + ir_mod, params = self.from_tensorflow(subgraph, shape=subgraph_shape_dict) + + if ir_mod: + wl = tvm.relay.var('partitioned_call') + sb = tvm.relay.scope_builder.ScopeBuilder() + sb.let(wl, ir_mod["main"]) + sb.ret(wl(*inputs)) + op = sb.get() + print(op) else: op = self._convert_operator(node.op, inputs, attr, self._graph) From 898b79b7af7d36af79aa33fd06ca261d86bdd203 Mon Sep 17 00:00:00 2001 From: deepak Date: Thu, 14 May 2020 17:41:11 +0530 Subject: [PATCH 16/55] Code Reorganization- First working iteration of positive spop test cases --- .../frontend/tensorflow/test_forward.py | 159 +++++++----------- 1 file changed, 57 insertions(+), 102 deletions(-) diff --git a/tests/python/frontend/tensorflow/test_forward.py b/tests/python/frontend/tensorflow/test_forward.py index 6d42473ed9fa..7022c13ecf94 100644 --- a/tests/python/frontend/tensorflow/test_forward.py +++ b/tests/python/frontend/tensorflow/test_forward.py @@ -181,7 +181,7 @@ def name_without_num(name): sess.run(variables.global_variables_initializer()) final_graph_def = tf_testing.AddShapesToGraphDef(sess, out_node) tf_output = run_tf_graph(sess, in_data, in_name, out_name) - # file_writer = tf.summary.FileWriter("/home/deepak/tfgraphsoutput/", sess.graph) + for device in ["llvm", "cuda"]: ctx = tvm.context(device, 0) if not ctx.exist: @@ -3167,26 +3167,28 @@ def test_forward_isinf(): def test_forward_isfinite(): _verify_infiniteness_ops(tf.is_finite, "isfinite") -def test_spop_function_invocation(): +def test_spop_placeholder_one(): + print("Inside placeholder function") tf.reset_default_graph() - with tf.Graph().as_default(): - - def fun1(a): - return tf.multiply(a,a) - - def fun2(b): - return tf.multiply(b,10) - - @function.Defun(dtypes.float32, dtypes.float32, func_name="Fun3") - def fun3(x,y): - x = fun2(x) - y = fun1(y) - z = tf.add(x,y) - return z + g = tf.Graph() + with g.as_default(): - op = gen_functional_ops.StatefulPartitionedCall(args=[tf.constant(10.5),tf.constant(20.4)], - Tout=[dtypes.float32], f=fun3, name="SpopFnInvocation") - compare_tf_with_tvm([],[], 'SpopFnInvocation:0', mode='vm', init_global_variables=True) + @function.Defun(*[tf.int32]*2) + def Forward(x,y): + print(x.name) + print(y.name) + b = tf.add(x, y) + return b + pl1 = tf.placeholder(tf.int32,name="pl1") + pl2 = tf.placeholder(tf.int32,name="pl2") + pl3 = tf.placeholder(tf.int32, name="pl3") + data = np.array([[-1, 1], [2, -2]], dtype=np.int32) + data2 = np.array([[-2, 3], [4, -6]], dtype=np.int32) + data3 = np.array([[-2, 3], [4, -6]], dtype=np.int32) + z1 = gen_functional_ops.StatefulPartitionedCall(args=[pl1,pl2], Tout=[tf.int32],f=Forward) + z2 = z1 + pl3 + compare_tf_with_tvm([data, data2, data3], ['pl1:0', 'pl2:0', 'pl3:0'], + ['StatefulPartitionedCall:0',z2.name], mode='vm', init_global_variables=True) def test_spop_placeholder_two(): @@ -3212,55 +3214,48 @@ def test_spop_placeholder_three(): # @tf.function(input_signature=[tf.TensorSpec(shape=(3, 3, 3), dtype=tf.int32), # tf.TensorSpec(shape=(3, 3, 3), dtype=tf.int32)]) - @tf.function() + @tf.function def add(x, y): return tf.add(x, y, "add_t1_t2") + t3 = add(t1, t2) compare_tf_with_tvm([t1_data, t2_data], ['t1:0', 't2:0'], [t3.name], mode='vm', init_global_variables=True) def test_spop_placeholder_four(): tf.disable_eager_execution() - t1 = tf.placeholder(tf.int32,name="t1") t1_data = np.array([[-1, 1, 3], [2, -2, 4], [2, -3, 14]], dtype=np.int32) - t2 = tf.placeholder(tf.int32, name="t2") t2_data = np.array([[-2, 1, 2], [12, -2, 14], [12, -3, 4]], dtype=np.int32) + tf.reset_default_graph() + t1 = tf.placeholder(tf.int32, name="t1") + t2 = tf.placeholder(tf.int32, name="t2") - @tf.function(input_signature=[tf.TensorSpec(shape=(3, 3, 3), dtype=tf.int32), - tf.TensorSpec(shape=(3, 3, 3), dtype=tf.int32)]) + @tf.function def add(x, y): return tf.add(x, y, "add_t1_t2") + t3 = add(t1, t2) compare_tf_with_tvm([t1_data, t2_data], ['t1:0', 't2:0'], [t3.name], mode='vm', init_global_variables=True) -def test_spop_placeholder_one(): - print("Inside placeholder function") +def test_spop_function_invocation(): tf.reset_default_graph() - g = tf.Graph() - with g.as_default(): - # @function.Defun(tf.TensorSpec(shape=(3,3,3), dtype=tf.int32), tf.TensorSpec(shape=(3,3,3), dtype=tf.int32)) - @function.Defun(*[tf.int32]*2) - def Forward(x,y): - #Do not create placeholders in Defun methods..placeholders should be created outside of Defun()..and can be passed inside it - print(x.name) - print(y.name) - b = tf.add(x, y) - return b - pl1 = tf.placeholder(tf.int32,name="pl1") - pl2 = tf.placeholder(tf.int32,name="pl2") - pl3 = tf.placeholder(tf.int32, name="pl3") - data = np.array([[-1, 1], [2, -2]], dtype=np.int32) - data2 = np.array([[-2, 3], [4, -6]], dtype=np.int32) - data3 = np.array([[-2, 3], [4, -6]], dtype=np.int32) - z1 = gen_functional_ops.StatefulPartitionedCall(args=[pl1,pl2], Tout=[tf.int32],f=Forward) - z2 = z1 + pl3 - compare_tf_with_tvm([data, data2], ['pl1:0', 'pl2:0'], - 'StatefulPartitionedCall:0', - mode='vm', - init_global_variables=True) - # compare_tf_with_tvm([data, data2, data3], ['pl1:0', 'pl2:0', 'pl3:0'], - # ['StatefulPartitionedCall:0',z2.name], - # mode='vm', - # init_global_variables=True) + with tf.Graph().as_default(): + + def fun1(a): + return tf.multiply(a,a) + + def fun2(b): + return tf.multiply(b,10) + + @function.Defun(dtypes.float32, dtypes.float32, func_name="Fun3") + def fun3(x,y): + x = fun2(x) + y = fun1(y) + z = tf.add(x,y) + return z + + op = gen_functional_ops.StatefulPartitionedCall(args=[tf.constant(10.5),tf.constant(20.4)], + Tout=[dtypes.float32], f=fun3, name="SpopFnInvocation") + compare_tf_with_tvm([],[], 'SpopFnInvocation:0', mode='vm', init_global_variables=True) def test_spop_arithmetic(): tf.reset_default_graph() @@ -3277,14 +3272,13 @@ def arithmetic(m,x,c): compare_tf_with_tvm([],[],'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) -def test_spop_control_flow_one(): +def test_spop_control_flow(): tf.reset_default_graph() with tf.Graph().as_default(): - # WSTART + @function.Defun(*[dtypes.float32] * 2) def Body1(x, y): with ops.device("/job:localhost/replica:0/task:0/device:CPU:0"): - # z = Body2 z = math_ops.multiply(x, y) i = 0 while i<10 : @@ -3297,61 +3291,23 @@ def Body1(x, y): op = gen_functional_ops.StatefulPartitionedCall(args=[constant_op.constant(32.), constant_op.constant(100.)], Tout=[dtypes.float32], f=Body1) compare_tf_with_tvm([], [], 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) -def test_spop_control_flow_two(): - tf.reset_default_graph() - with tf.Graph().as_default(): - @function.Defun() - def vanilla_loop(): - i = tf.constant(0) - - def c(i): return tf.less(i, 10) - - def b(i): return tf.add(i, 1) - r = tf.while_loop(c, b, [i]) - return r - z = gen_functional_ops.StatefulPartitionedCall(args=[], Tout=[tf.int32], f=vanilla_loop) - compare_tf_with_tvm([], [], 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) - -def test_spop_control_flow(): - test_spop_control_flow_one() - # test_spop_control_flow_two() - def test_spop_variables(): tf.reset_default_graph() g = tf.Graph() with g.as_default(): - @function.Defun(tf.int32,tf.int32) - def Forward(x,y): - #create variables outside Defun() method, you can pass variables inside Defun method though - return tf.multiply(x,y) const1 = tf.constant(10) const2 = tf.constant(20) var1 = tf.Variable(const1, dtype=tf.int32) var2 = tf.Variable(const2, dtype=tf.int32) + @function.Defun(tf.int32,tf.int32) + def Forward(x,y): + return tf.multiply(x,y) + z = gen_functional_ops.StatefulPartitionedCall(args=[var1,var2],Tout=[tf.int32], f=Forward) compare_tf_with_tvm([], [], 'StatefulPartitionedCall:0', init_global_variables=True, mode="vm") -def test_spop_variables_one(): - tf.reset_default_graph() - with tf.Graph().as_default(): - data = np.random.uniform(size=(32, 100)).astype('float32') - - @function.Defun() - def variableFn(): - input_op = array_ops.placeholder(shape=data.shape, dtype=data.dtype) - input_tensor = array_ops.reshape(input_op, data.shape) - - size = input_tensor.shape.dims[1] - with variable_scope.variable_scope("linear", reuse=None): - w = variable_scope.get_variable("w", shape=[size, size], dtype=input_tensor.dtype) - ret = math_ops.matmul(input_tensor, w) - return ret - - z = gen_functional_ops.StatefulPartitionedCall(args=[], Tout=[dtypes.float32], f=variableFn) - compare_tf_with_tvm(data, 'Placeholder:0', 'StatefulPartitionedCall:0', init_global_variables=True, mode="vm") - def test_spop_constants(): tf.reset_default_graph() with tf.Graph().as_default(): @@ -3370,23 +3326,22 @@ def test_spop_placeholder(): test_spop_placeholder_one() test_spop_placeholder_two() test_spop_placeholder_three() - # test_spop_placeholder_four() + test_spop_placeholder_four() -def test_spop(): +def test_spop_positive(): + test_spop_placeholder() test_spop_function_invocation() test_spop_arithmetic() test_spop_control_flow() test_spop_variables() test_spop_constants() - test_spop_placeholder() - ####################################################################### # Main # ---- if __name__ == '__main__': # StatefulPartitionedOp - # test_spop() + test_spop_positive() # Transforms test_forward_slice() test_forward_transpose() From c6e0eafc0728d02453320d638aa95ca304af808a Mon Sep 17 00:00:00 2001 From: Prashant Sail Date: Thu, 14 May 2020 17:41:16 +0530 Subject: [PATCH 17/55] corrected variable name after refactor --- python/tvm/relay/frontend/tensorflow.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/tvm/relay/frontend/tensorflow.py b/python/tvm/relay/frontend/tensorflow.py index 9bc90c3e5ab0..6027a0d7af17 100644 --- a/python/tvm/relay/frontend/tensorflow.py +++ b/python/tvm/relay/frontend/tensorflow.py @@ -3280,7 +3280,7 @@ def _backtrack_construct(self, node_name): inputs.append(in_op) if node.op in ["PartitionedCall", "StatefulPartitionedCall"]: - tf_graph = None + ir_mod = None node_func_name = node.attr.get('f').func.name func = next((f for f in self._graphLibraryFunctions if f.signature.name == node_func_name), None) if func: From 6ac4d7fccd7680431adf82aa1ef62b51afb14df5 Mon Sep 17 00:00:00 2001 From: deepak Date: Thu, 14 May 2020 18:11:29 +0530 Subject: [PATCH 18/55] Code Reorganization- First working iteration of positive spop test cases --- .../frontend/tensorflow/test_forward.py | 44 +++++++++---------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/tests/python/frontend/tensorflow/test_forward.py b/tests/python/frontend/tensorflow/test_forward.py index 7022c13ecf94..ec954fe6c109 100644 --- a/tests/python/frontend/tensorflow/test_forward.py +++ b/tests/python/frontend/tensorflow/test_forward.py @@ -3167,7 +3167,7 @@ def test_forward_isinf(): def test_forward_isfinite(): _verify_infiniteness_ops(tf.is_finite, "isfinite") -def test_spop_placeholder_one(): +def _test_spop_placeholder_one(): print("Inside placeholder function") tf.reset_default_graph() g = tf.Graph() @@ -3190,7 +3190,7 @@ def Forward(x,y): compare_tf_with_tvm([data, data2, data3], ['pl1:0', 'pl2:0', 'pl3:0'], ['StatefulPartitionedCall:0',z2.name], mode='vm', init_global_variables=True) -def test_spop_placeholder_two(): +def _test_spop_placeholder_two(): with tf.Graph().as_default(): data = np.ones([1], dtype=int).astype(np.int32) @@ -3205,7 +3205,7 @@ def pl_with_default(pl): z = gen_functional_ops.StatefulPartitionedCall(args=[tpl], Tout=[tf.int32], f=pl_with_default) compare_tf_with_tvm(data, ['pl1:0'], 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) -def test_spop_placeholder_three(): +def _test_spop_placeholder_three(): tf.disable_eager_execution() t1 = tf.placeholder(tf.int32, (3, 3, 3), "t1") t1_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3)) @@ -3221,7 +3221,7 @@ def add(x, y): t3 = add(t1, t2) compare_tf_with_tvm([t1_data, t2_data], ['t1:0', 't2:0'], [t3.name], mode='vm', init_global_variables=True) -def test_spop_placeholder_four(): +def _test_spop_placeholder_four(): tf.disable_eager_execution() t1_data = np.array([[-1, 1, 3], [2, -2, 4], [2, -3, 14]], dtype=np.int32) t2_data = np.array([[-2, 1, 2], [12, -2, 14], [12, -3, 4]], dtype=np.int32) @@ -3236,7 +3236,7 @@ def add(x, y): t3 = add(t1, t2) compare_tf_with_tvm([t1_data, t2_data], ['t1:0', 't2:0'], [t3.name], mode='vm', init_global_variables=True) -def test_spop_function_invocation(): +def _test_spop_function_invocation(): tf.reset_default_graph() with tf.Graph().as_default(): @@ -3257,7 +3257,7 @@ def fun3(x,y): Tout=[dtypes.float32], f=fun3, name="SpopFnInvocation") compare_tf_with_tvm([],[], 'SpopFnInvocation:0', mode='vm', init_global_variables=True) -def test_spop_arithmetic(): +def _test_spop_arithmetic(): tf.reset_default_graph() with tf.Graph().as_default(): @function.Defun(*[dtypes.int32]*3) @@ -3272,7 +3272,7 @@ def arithmetic(m,x,c): compare_tf_with_tvm([],[],'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) -def test_spop_control_flow(): +def _test_spop_control_flow(): tf.reset_default_graph() with tf.Graph().as_default(): @@ -3291,7 +3291,7 @@ def Body1(x, y): op = gen_functional_ops.StatefulPartitionedCall(args=[constant_op.constant(32.), constant_op.constant(100.)], Tout=[dtypes.float32], f=Body1) compare_tf_with_tvm([], [], 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) -def test_spop_variables(): +def _test_spop_variables(): tf.reset_default_graph() g = tf.Graph() with g.as_default(): @@ -3308,7 +3308,7 @@ def Forward(x,y): z = gen_functional_ops.StatefulPartitionedCall(args=[var1,var2],Tout=[tf.int32], f=Forward) compare_tf_with_tvm([], [], 'StatefulPartitionedCall:0', init_global_variables=True, mode="vm") -def test_spop_constants(): +def _test_spop_constants(): tf.reset_default_graph() with tf.Graph().as_default(): @function.Defun(*[dtypes.int32] * 2) @@ -3322,26 +3322,26 @@ def constantsFn(x, y): compare_tf_with_tvm([], [], 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) -def test_spop_placeholder(): - test_spop_placeholder_one() - test_spop_placeholder_two() - test_spop_placeholder_three() - test_spop_placeholder_four() +def _test_spop_placeholder(): + _test_spop_placeholder_one() + _test_spop_placeholder_two() + _test_spop_placeholder_three() + _test_spop_placeholder_four() -def test_spop_positive(): - test_spop_placeholder() - test_spop_function_invocation() - test_spop_arithmetic() - test_spop_control_flow() - test_spop_variables() - test_spop_constants() +def test_forward_spop_positive(): + _test_spop_placeholder() + _test_spop_function_invocation() + _test_spop_arithmetic() + _test_spop_control_flow() + _test_spop_variables() + _test_spop_constants() ####################################################################### # Main # ---- if __name__ == '__main__': # StatefulPartitionedOp - test_spop_positive() + test_forward_spop_positive() # Transforms test_forward_slice() test_forward_transpose() From b046af1a2769bd3d8b4dc0fee69cece4dd90eae3 Mon Sep 17 00:00:00 2001 From: Prashant Sail Date: Thu, 14 May 2020 19:39:19 +0530 Subject: [PATCH 19/55] move code inside mapped operator function --- python/tvm/relay/frontend/tensorflow.py | 83 +++++++++---------------- 1 file changed, 29 insertions(+), 54 deletions(-) diff --git a/python/tvm/relay/frontend/tensorflow.py b/python/tvm/relay/frontend/tensorflow.py index 6027a0d7af17..f9d970d3ce51 100644 --- a/python/tvm/relay/frontend/tensorflow.py +++ b/python/tvm/relay/frontend/tensorflow.py @@ -1931,26 +1931,34 @@ def _impl(inputs, attr, params, mod): return _impl def _partitioned_call(): - def _impl(inputs, attr, params): - if not isinstance(inputs, tuple): - inputs = list(inputs) - assert len(inputs) > 0, "add_n take >=1 inputs, but 0 given." - _res = inputs[0] - for each in inputs[1:]: - _res = _op.add(_res, each) - return _res + def _impl(inputs, attr, params, mod, graph): + node_func_name = attr.get('f').name + func = next((f for f in graph.library.function if f.signature.name == node_func_name), None) + if func: + from tensorflow.python.framework import function_def_to_graph + + # Convert function definition to graph + func_input_shapes = func.attr["_input_shapes"].list.shape + subgraph, flat_tensor_name = function_def_to_graph.function_def_to_graph_def(func, func_input_shapes) + + # Computing subgraph's input shape dictionary + subgraph_shape_dict = {} + for f_arg, input in zip(func.signature.input_arg, inputs): + subgraph_shape_dict[f_arg.name] = _infer_shape(input) + + # Construct relay nodes from the subgraph + g = GraphProto() + mod, params = g.from_tensorflow(subgraph, shape=subgraph_shape_dict) + wl = tvm.relay.var('partitioned_call') + sb = tvm.relay.scope_builder.ScopeBuilder() + sb.let(wl, mod["main"]) + sb.ret(wl(*inputs)) + op = sb.get() + return op return _impl def _stateful_partitioned_call(): - def _impl(inputs, attr, params): - if not isinstance(inputs, tuple): - inputs = list(inputs) - assert len(inputs) > 0, "add_n take >=1 inputs, but 0 given." - _res = inputs[0] - for each in inputs[1:]: - _res = _op.add(_res, each) - return _res - return _impl + return _partitioned_call() # compatible operators that do NOT require any conversion. @@ -2057,7 +2065,6 @@ def _impl(inputs, attr, params): 'OneHot' : _one_hot(), 'Pack' : _pack(), 'PartitionedCall' : _partitioned_call(), - 'StatefulPartitionedCall' : _stateful_partitioned_call(), 'Pad' : _pad('Pad'), 'PadV2' : _pad('PadV2'), 'Pow' : _elemwise('power'), @@ -2094,6 +2101,7 @@ def _impl(inputs, attr, params): 'Square' : _square(), 'SquaredDifference' : _squared_difference(), 'Squeeze' : _squeeze(), + 'StatefulPartitionedCall' : _stateful_partitioned_call(), 'StopGradient' : _identity(), 'StridedSlice' : _stridedSlice(), 'Sub' : _elemwise('subtract'), @@ -2744,7 +2752,6 @@ def __init__(self): self._loop_var_order = {} self._hash2tfnode = {} self._while_loop_name_set = set() - self._graphLibraryFunctions = [] def from_tensorflow(self, graph, layout="NHWC", shape=None, outputs=None): """Construct relay nodes from tensorflow graph definition - GraphDef. @@ -2796,11 +2803,6 @@ def from_tensorflow(self, graph, layout="NHWC", shape=None, outputs=None): self._layout = layout self._graph = graph - # ToDo: Need a better way to reference graph.library.function - # "_graphLibraryFunctions+=" is used to accumulate all library functions - # as parent graph\self._graph is lost after recursive calls to from_tensorflow - self._graphLibraryFunctions += graph.library.function - if missing_operators: freezed_ops = [op for op in missing_operators if op in _freezed_graph_pruned_op_list] @@ -3221,7 +3223,9 @@ def _convert_operator(self, op_name, inputs, attrs, if op_name in identity_list: sym = get_relay_op(op_name)(*inputs, **attrs) elif op_name in convert_map: - if _need_prelude_for_shape_inference(op_name): + if op_name in ["PartitionedCall", "StatefulPartitionedCall"]: + sym = convert_map[op_name](inputs, attrs, self._params, self._mod, self._graph) + elif _need_prelude_for_shape_inference(op_name): sym = convert_map[op_name](inputs, attrs, self._params, self._prelude) else: sym = convert_map[op_name](inputs, attrs, self._params, self._mod) @@ -3279,35 +3283,6 @@ def _backtrack_construct(self, node_name): in_op = in_op[0] inputs.append(in_op) - if node.op in ["PartitionedCall", "StatefulPartitionedCall"]: - ir_mod = None - node_func_name = node.attr.get('f').func.name - func = next((f for f in self._graphLibraryFunctions if f.signature.name == node_func_name), None) - if func: - from tensorflow.python.framework import function_def_to_graph - - # Convert function definition to graph - func_input_shapes = func.attr["_input_shapes"].list.shape - subgraph, flat_tensor_name = function_def_to_graph.function_def_to_graph_def(func, func_input_shapes) - - # Computing subgraph's input shape dictionary - subgraph_shape_dict = {} - for f_arg, node_input in zip(func.signature.input_arg, node.input): - input_tensor = self._nodes.get(node_input, None) - if input_tensor: - subgraph_shape_dict[f_arg.name] = _infer_shape(input_tensor[0]) # ToDo: Is [0] always a safe access ? - - # Construct relay nodes from the subgraph - ir_mod, params = self.from_tensorflow(subgraph, shape=subgraph_shape_dict) - - if ir_mod: - wl = tvm.relay.var('partitioned_call') - sb = tvm.relay.scope_builder.ScopeBuilder() - sb.let(wl, ir_mod["main"]) - sb.ret(wl(*inputs)) - op = sb.get() - print(op) - else: op = self._convert_operator(node.op, inputs, attr, self._graph) if isinstance(op, np.ndarray): From 4ae8deaa215fa028276939e9da582c984956ee1b Mon Sep 17 00:00:00 2001 From: Prashant Sail Date: Thu, 14 May 2020 19:47:32 +0530 Subject: [PATCH 20/55] Removed extra line --- python/tvm/relay/frontend/tensorflow.py | 1 - 1 file changed, 1 deletion(-) diff --git a/python/tvm/relay/frontend/tensorflow.py b/python/tvm/relay/frontend/tensorflow.py index f9d970d3ce51..56424a3b7a47 100644 --- a/python/tvm/relay/frontend/tensorflow.py +++ b/python/tvm/relay/frontend/tensorflow.py @@ -2803,7 +2803,6 @@ def from_tensorflow(self, graph, layout="NHWC", shape=None, outputs=None): self._layout = layout self._graph = graph - if missing_operators: freezed_ops = [op for op in missing_operators if op in _freezed_graph_pruned_op_list] if freezed_ops: From b3de79d0bb9cfac3123f8191ffa70784536f6361 Mon Sep 17 00:00:00 2001 From: Prashant Sail Date: Thu, 14 May 2020 16:13:47 +0530 Subject: [PATCH 21/55] support variables in function args --- python/tvm/relay/frontend/tensorflow.py | 28 ------------------------- 1 file changed, 28 deletions(-) diff --git a/python/tvm/relay/frontend/tensorflow.py b/python/tvm/relay/frontend/tensorflow.py index 3adf175debf4..3f10193d9241 100644 --- a/python/tvm/relay/frontend/tensorflow.py +++ b/python/tvm/relay/frontend/tensorflow.py @@ -2797,9 +2797,6 @@ def from_tensorflow(self, graph, layout="NHWC", shape=None, outputs=None): self._layout = layout self._graph = graph - # ToDo: need _subgraphFunctions as self._graph gets updated on recursive calls - self._subgraphFunctions += graph.library.function - if missing_operators: freezed_ops = [op for op in missing_operators if op in _freezed_graph_pruned_op_list] if freezed_ops: @@ -3277,31 +3274,6 @@ def _backtrack_construct(self, node_name): in_op = in_op[0] inputs.append(in_op) - if node.op in ["PartitionedCall", "StatefulPartitionedCall"]: - node_fname = node.attr.get('f').func.name - f1 = next((func for func in self._subgraphFunctions if func.signature.name == node_fname), None) - if f1 and f1.signature.name not in self._subgraphs: - from tensorflow.python.framework import function_def_to_graph - f1_input_shapes = f1.attr["_input_shapes"].list.shape - subgraph, flat_tensor_name = function_def_to_graph.function_def_to_graph_def(f1, f1_input_shapes) - - subgraph_shape_dict = {} - for f_arg, node_input in zip(f1.signature.input_arg, node.input): - input_tensor = self._nodes.get(node_input, None) - if input_tensor: - subgraph_shape_dict[f_arg.name] = _infer_shape(input_tensor[0]) - tf_graph = self.from_tensorflow(subgraph, shape=subgraph_shape_dict) - self._subgraphs.update({f1.signature.name: tf_graph}) - - f1 = self._subgraphs[attr["f"].name][0]["main"] - wl = tvm.relay.var('partitioned_call') - sb = tvm.relay.scope_builder.ScopeBuilder() - sb.let(wl, f1) - - sb.ret(wl(*inputs)) - op = sb.get() - print(op) - else: op = self._convert_operator(node.op, inputs, attr, self._graph) if isinstance(op, np.ndarray): From f767360efbd55f94c2532f062fe76afd23b58124 Mon Sep 17 00:00:00 2001 From: Prashant Sail Date: Thu, 14 May 2020 17:39:32 +0530 Subject: [PATCH 22/55] Removed commented code, simplified code --- python/tvm/relay/frontend/tensorflow.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/python/tvm/relay/frontend/tensorflow.py b/python/tvm/relay/frontend/tensorflow.py index 3f10193d9241..e9b01632384f 100644 --- a/python/tvm/relay/frontend/tensorflow.py +++ b/python/tvm/relay/frontend/tensorflow.py @@ -2744,8 +2744,7 @@ def __init__(self): self._loop_var_order = {} self._hash2tfnode = {} self._while_loop_name_set = set() - self._subgraphs = {} - self._subgraphFunctions = [] + self._graphLibraryFunctions = [] def from_tensorflow(self, graph, layout="NHWC", shape=None, outputs=None): """Construct relay nodes from tensorflow graph definition - GraphDef. From e3517ddfca4904f4a0897a5fe709c1f4c5c8a9bf Mon Sep 17 00:00:00 2001 From: Prashant Sail Date: Thu, 14 May 2020 19:39:19 +0530 Subject: [PATCH 23/55] move code inside mapped operator function --- python/tvm/relay/frontend/tensorflow.py | 49 +++++++++++++++---------- 1 file changed, 29 insertions(+), 20 deletions(-) diff --git a/python/tvm/relay/frontend/tensorflow.py b/python/tvm/relay/frontend/tensorflow.py index e9b01632384f..56424a3b7a47 100644 --- a/python/tvm/relay/frontend/tensorflow.py +++ b/python/tvm/relay/frontend/tensorflow.py @@ -1931,26 +1931,34 @@ def _impl(inputs, attr, params, mod): return _impl def _partitioned_call(): - def _impl(inputs, attr, params): - if not isinstance(inputs, tuple): - inputs = list(inputs) - assert len(inputs) > 0, "add_n take >=1 inputs, but 0 given." - _res = inputs[0] - for each in inputs[1:]: - _res = _op.add(_res, each) - return _res + def _impl(inputs, attr, params, mod, graph): + node_func_name = attr.get('f').name + func = next((f for f in graph.library.function if f.signature.name == node_func_name), None) + if func: + from tensorflow.python.framework import function_def_to_graph + + # Convert function definition to graph + func_input_shapes = func.attr["_input_shapes"].list.shape + subgraph, flat_tensor_name = function_def_to_graph.function_def_to_graph_def(func, func_input_shapes) + + # Computing subgraph's input shape dictionary + subgraph_shape_dict = {} + for f_arg, input in zip(func.signature.input_arg, inputs): + subgraph_shape_dict[f_arg.name] = _infer_shape(input) + + # Construct relay nodes from the subgraph + g = GraphProto() + mod, params = g.from_tensorflow(subgraph, shape=subgraph_shape_dict) + wl = tvm.relay.var('partitioned_call') + sb = tvm.relay.scope_builder.ScopeBuilder() + sb.let(wl, mod["main"]) + sb.ret(wl(*inputs)) + op = sb.get() + return op return _impl def _stateful_partitioned_call(): - def _impl(inputs, attr, params): - if not isinstance(inputs, tuple): - inputs = list(inputs) - assert len(inputs) > 0, "add_n take >=1 inputs, but 0 given." - _res = inputs[0] - for each in inputs[1:]: - _res = _op.add(_res, each) - return _res - return _impl + return _partitioned_call() # compatible operators that do NOT require any conversion. @@ -2057,7 +2065,6 @@ def _impl(inputs, attr, params): 'OneHot' : _one_hot(), 'Pack' : _pack(), 'PartitionedCall' : _partitioned_call(), - 'StatefulPartitionedCall' : _stateful_partitioned_call(), 'Pad' : _pad('Pad'), 'PadV2' : _pad('PadV2'), 'Pow' : _elemwise('power'), @@ -2094,6 +2101,7 @@ def _impl(inputs, attr, params): 'Square' : _square(), 'SquaredDifference' : _squared_difference(), 'Squeeze' : _squeeze(), + 'StatefulPartitionedCall' : _stateful_partitioned_call(), 'StopGradient' : _identity(), 'StridedSlice' : _stridedSlice(), 'Sub' : _elemwise('subtract'), @@ -2744,7 +2752,6 @@ def __init__(self): self._loop_var_order = {} self._hash2tfnode = {} self._while_loop_name_set = set() - self._graphLibraryFunctions = [] def from_tensorflow(self, graph, layout="NHWC", shape=None, outputs=None): """Construct relay nodes from tensorflow graph definition - GraphDef. @@ -3215,7 +3222,9 @@ def _convert_operator(self, op_name, inputs, attrs, if op_name in identity_list: sym = get_relay_op(op_name)(*inputs, **attrs) elif op_name in convert_map: - if _need_prelude_for_shape_inference(op_name): + if op_name in ["PartitionedCall", "StatefulPartitionedCall"]: + sym = convert_map[op_name](inputs, attrs, self._params, self._mod, self._graph) + elif _need_prelude_for_shape_inference(op_name): sym = convert_map[op_name](inputs, attrs, self._params, self._prelude) else: sym = convert_map[op_name](inputs, attrs, self._params, self._mod) From 27fb98a35f450d6b36d006e63cd90c80605872e1 Mon Sep 17 00:00:00 2001 From: Prashant Sail Date: Thu, 14 May 2020 20:42:05 +0530 Subject: [PATCH 24/55] Code Reorganization- First working iteration of positive spop test cases # Conflicts: # tests/python/frontend/tensorflow/test_forward.py --- .../frontend/tensorflow/test_forward.py | 44 +++++++++---------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/tests/python/frontend/tensorflow/test_forward.py b/tests/python/frontend/tensorflow/test_forward.py index ec954fe6c109..7022c13ecf94 100644 --- a/tests/python/frontend/tensorflow/test_forward.py +++ b/tests/python/frontend/tensorflow/test_forward.py @@ -3167,7 +3167,7 @@ def test_forward_isinf(): def test_forward_isfinite(): _verify_infiniteness_ops(tf.is_finite, "isfinite") -def _test_spop_placeholder_one(): +def test_spop_placeholder_one(): print("Inside placeholder function") tf.reset_default_graph() g = tf.Graph() @@ -3190,7 +3190,7 @@ def Forward(x,y): compare_tf_with_tvm([data, data2, data3], ['pl1:0', 'pl2:0', 'pl3:0'], ['StatefulPartitionedCall:0',z2.name], mode='vm', init_global_variables=True) -def _test_spop_placeholder_two(): +def test_spop_placeholder_two(): with tf.Graph().as_default(): data = np.ones([1], dtype=int).astype(np.int32) @@ -3205,7 +3205,7 @@ def pl_with_default(pl): z = gen_functional_ops.StatefulPartitionedCall(args=[tpl], Tout=[tf.int32], f=pl_with_default) compare_tf_with_tvm(data, ['pl1:0'], 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) -def _test_spop_placeholder_three(): +def test_spop_placeholder_three(): tf.disable_eager_execution() t1 = tf.placeholder(tf.int32, (3, 3, 3), "t1") t1_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3)) @@ -3221,7 +3221,7 @@ def add(x, y): t3 = add(t1, t2) compare_tf_with_tvm([t1_data, t2_data], ['t1:0', 't2:0'], [t3.name], mode='vm', init_global_variables=True) -def _test_spop_placeholder_four(): +def test_spop_placeholder_four(): tf.disable_eager_execution() t1_data = np.array([[-1, 1, 3], [2, -2, 4], [2, -3, 14]], dtype=np.int32) t2_data = np.array([[-2, 1, 2], [12, -2, 14], [12, -3, 4]], dtype=np.int32) @@ -3236,7 +3236,7 @@ def add(x, y): t3 = add(t1, t2) compare_tf_with_tvm([t1_data, t2_data], ['t1:0', 't2:0'], [t3.name], mode='vm', init_global_variables=True) -def _test_spop_function_invocation(): +def test_spop_function_invocation(): tf.reset_default_graph() with tf.Graph().as_default(): @@ -3257,7 +3257,7 @@ def fun3(x,y): Tout=[dtypes.float32], f=fun3, name="SpopFnInvocation") compare_tf_with_tvm([],[], 'SpopFnInvocation:0', mode='vm', init_global_variables=True) -def _test_spop_arithmetic(): +def test_spop_arithmetic(): tf.reset_default_graph() with tf.Graph().as_default(): @function.Defun(*[dtypes.int32]*3) @@ -3272,7 +3272,7 @@ def arithmetic(m,x,c): compare_tf_with_tvm([],[],'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) -def _test_spop_control_flow(): +def test_spop_control_flow(): tf.reset_default_graph() with tf.Graph().as_default(): @@ -3291,7 +3291,7 @@ def Body1(x, y): op = gen_functional_ops.StatefulPartitionedCall(args=[constant_op.constant(32.), constant_op.constant(100.)], Tout=[dtypes.float32], f=Body1) compare_tf_with_tvm([], [], 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) -def _test_spop_variables(): +def test_spop_variables(): tf.reset_default_graph() g = tf.Graph() with g.as_default(): @@ -3308,7 +3308,7 @@ def Forward(x,y): z = gen_functional_ops.StatefulPartitionedCall(args=[var1,var2],Tout=[tf.int32], f=Forward) compare_tf_with_tvm([], [], 'StatefulPartitionedCall:0', init_global_variables=True, mode="vm") -def _test_spop_constants(): +def test_spop_constants(): tf.reset_default_graph() with tf.Graph().as_default(): @function.Defun(*[dtypes.int32] * 2) @@ -3322,26 +3322,26 @@ def constantsFn(x, y): compare_tf_with_tvm([], [], 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) -def _test_spop_placeholder(): - _test_spop_placeholder_one() - _test_spop_placeholder_two() - _test_spop_placeholder_three() - _test_spop_placeholder_four() +def test_spop_placeholder(): + test_spop_placeholder_one() + test_spop_placeholder_two() + test_spop_placeholder_three() + test_spop_placeholder_four() -def test_forward_spop_positive(): - _test_spop_placeholder() - _test_spop_function_invocation() - _test_spop_arithmetic() - _test_spop_control_flow() - _test_spop_variables() - _test_spop_constants() +def test_spop_positive(): + test_spop_placeholder() + test_spop_function_invocation() + test_spop_arithmetic() + test_spop_control_flow() + test_spop_variables() + test_spop_constants() ####################################################################### # Main # ---- if __name__ == '__main__': # StatefulPartitionedOp - test_forward_spop_positive() + test_spop_positive() # Transforms test_forward_slice() test_forward_transpose() From 8589fec40297ad6b53e4e7fc68f9a65198ebe93c Mon Sep 17 00:00:00 2001 From: deepak Date: Thu, 14 May 2020 18:11:29 +0530 Subject: [PATCH 25/55] Code Reorganization- First working iteration of positive spop test cases --- .../frontend/tensorflow/test_forward.py | 44 +++++++++---------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/tests/python/frontend/tensorflow/test_forward.py b/tests/python/frontend/tensorflow/test_forward.py index 7022c13ecf94..ec954fe6c109 100644 --- a/tests/python/frontend/tensorflow/test_forward.py +++ b/tests/python/frontend/tensorflow/test_forward.py @@ -3167,7 +3167,7 @@ def test_forward_isinf(): def test_forward_isfinite(): _verify_infiniteness_ops(tf.is_finite, "isfinite") -def test_spop_placeholder_one(): +def _test_spop_placeholder_one(): print("Inside placeholder function") tf.reset_default_graph() g = tf.Graph() @@ -3190,7 +3190,7 @@ def Forward(x,y): compare_tf_with_tvm([data, data2, data3], ['pl1:0', 'pl2:0', 'pl3:0'], ['StatefulPartitionedCall:0',z2.name], mode='vm', init_global_variables=True) -def test_spop_placeholder_two(): +def _test_spop_placeholder_two(): with tf.Graph().as_default(): data = np.ones([1], dtype=int).astype(np.int32) @@ -3205,7 +3205,7 @@ def pl_with_default(pl): z = gen_functional_ops.StatefulPartitionedCall(args=[tpl], Tout=[tf.int32], f=pl_with_default) compare_tf_with_tvm(data, ['pl1:0'], 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) -def test_spop_placeholder_three(): +def _test_spop_placeholder_three(): tf.disable_eager_execution() t1 = tf.placeholder(tf.int32, (3, 3, 3), "t1") t1_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3)) @@ -3221,7 +3221,7 @@ def add(x, y): t3 = add(t1, t2) compare_tf_with_tvm([t1_data, t2_data], ['t1:0', 't2:0'], [t3.name], mode='vm', init_global_variables=True) -def test_spop_placeholder_four(): +def _test_spop_placeholder_four(): tf.disable_eager_execution() t1_data = np.array([[-1, 1, 3], [2, -2, 4], [2, -3, 14]], dtype=np.int32) t2_data = np.array([[-2, 1, 2], [12, -2, 14], [12, -3, 4]], dtype=np.int32) @@ -3236,7 +3236,7 @@ def add(x, y): t3 = add(t1, t2) compare_tf_with_tvm([t1_data, t2_data], ['t1:0', 't2:0'], [t3.name], mode='vm', init_global_variables=True) -def test_spop_function_invocation(): +def _test_spop_function_invocation(): tf.reset_default_graph() with tf.Graph().as_default(): @@ -3257,7 +3257,7 @@ def fun3(x,y): Tout=[dtypes.float32], f=fun3, name="SpopFnInvocation") compare_tf_with_tvm([],[], 'SpopFnInvocation:0', mode='vm', init_global_variables=True) -def test_spop_arithmetic(): +def _test_spop_arithmetic(): tf.reset_default_graph() with tf.Graph().as_default(): @function.Defun(*[dtypes.int32]*3) @@ -3272,7 +3272,7 @@ def arithmetic(m,x,c): compare_tf_with_tvm([],[],'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) -def test_spop_control_flow(): +def _test_spop_control_flow(): tf.reset_default_graph() with tf.Graph().as_default(): @@ -3291,7 +3291,7 @@ def Body1(x, y): op = gen_functional_ops.StatefulPartitionedCall(args=[constant_op.constant(32.), constant_op.constant(100.)], Tout=[dtypes.float32], f=Body1) compare_tf_with_tvm([], [], 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) -def test_spop_variables(): +def _test_spop_variables(): tf.reset_default_graph() g = tf.Graph() with g.as_default(): @@ -3308,7 +3308,7 @@ def Forward(x,y): z = gen_functional_ops.StatefulPartitionedCall(args=[var1,var2],Tout=[tf.int32], f=Forward) compare_tf_with_tvm([], [], 'StatefulPartitionedCall:0', init_global_variables=True, mode="vm") -def test_spop_constants(): +def _test_spop_constants(): tf.reset_default_graph() with tf.Graph().as_default(): @function.Defun(*[dtypes.int32] * 2) @@ -3322,26 +3322,26 @@ def constantsFn(x, y): compare_tf_with_tvm([], [], 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) -def test_spop_placeholder(): - test_spop_placeholder_one() - test_spop_placeholder_two() - test_spop_placeholder_three() - test_spop_placeholder_four() +def _test_spop_placeholder(): + _test_spop_placeholder_one() + _test_spop_placeholder_two() + _test_spop_placeholder_three() + _test_spop_placeholder_four() -def test_spop_positive(): - test_spop_placeholder() - test_spop_function_invocation() - test_spop_arithmetic() - test_spop_control_flow() - test_spop_variables() - test_spop_constants() +def test_forward_spop_positive(): + _test_spop_placeholder() + _test_spop_function_invocation() + _test_spop_arithmetic() + _test_spop_control_flow() + _test_spop_variables() + _test_spop_constants() ####################################################################### # Main # ---- if __name__ == '__main__': # StatefulPartitionedOp - test_spop_positive() + test_forward_spop_positive() # Transforms test_forward_slice() test_forward_transpose() From 0f637b4913ea496ac05b9fdb56981e38b33543c0 Mon Sep 17 00:00:00 2001 From: deepak Date: Fri, 15 May 2020 00:48:14 +0530 Subject: [PATCH 26/55] Function invocation more test cases --- .../frontend/tensorflow/test_forward.py | 94 ++++++++++++++++++- 1 file changed, 90 insertions(+), 4 deletions(-) diff --git a/tests/python/frontend/tensorflow/test_forward.py b/tests/python/frontend/tensorflow/test_forward.py index ec954fe6c109..d72e77bbc093 100644 --- a/tests/python/frontend/tensorflow/test_forward.py +++ b/tests/python/frontend/tensorflow/test_forward.py @@ -3168,7 +3168,6 @@ def test_forward_isfinite(): _verify_infiniteness_ops(tf.is_finite, "isfinite") def _test_spop_placeholder_one(): - print("Inside placeholder function") tf.reset_default_graph() g = tf.Graph() with g.as_default(): @@ -3212,8 +3211,6 @@ def _test_spop_placeholder_three(): t2 = tf.placeholder(tf.int32, (3, 3, 3), "t2") t2_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3)) - # @tf.function(input_signature=[tf.TensorSpec(shape=(3, 3, 3), dtype=tf.int32), - # tf.TensorSpec(shape=(3, 3, 3), dtype=tf.int32)]) @tf.function def add(x, y): return tf.add(x, y, "add_t1_t2") @@ -3236,16 +3233,98 @@ def add(x, y): t3 = add(t1, t2) compare_tf_with_tvm([t1_data, t2_data], ['t1:0', 't2:0'], [t3.name], mode='vm', init_global_variables=True) -def _test_spop_function_invocation(): +def _test_spop_function_invocation_one(): + tf.disable_eager_execution() + tf.reset_default_graph() + with tf.Graph().as_default(): + + def fun1(a): + return tf.multiply(a,a) + + def fun2(b): + return tf.multiply(b,10) + + @tf.function + def fun3(x,y): + x = fun2(x) + y = fun1(y) + z = tf.add(x,y) + return z + + t3 = fun3(tf.constant(10.5), tf.constant(20.4)) + + compare_tf_with_tvm([], [], [t3.name], mode='vm', init_global_variables=True) + +def _test_spop_function_invocation_callable_graph(): + tf.disable_eager_execution() tf.reset_default_graph() with tf.Graph().as_default(): + @tf.function def fun1(a): return tf.multiply(a,a) + @tf.function def fun2(b): return tf.multiply(b,10) + @tf.function + def fun3(x,y): + x = fun2(x) + y = fun1(y) + z = tf.add(x,y) + return z + + t3 = fun3(tf.constant(10.5), tf.constant(20.4)) + + compare_tf_with_tvm([], [], [t3.name], mode='vm', init_global_variables=True) + +def _test_spop_function_invocation_simple(): + tf.disable_eager_execution() + tf.reset_default_graph() + with tf.Graph().as_default(): + + @tf.function() + def fun2(): + return tf.constant(1) + + @tf.function() + def fun3(): + return fun2() + + t3 = fun3() + + compare_tf_with_tvm([], [], [t3.name], mode='vm', init_global_variables=True) + +def _test_spop_function_invocation_params(): + tf.disable_eager_execution() + tf.reset_default_graph() + with tf.Graph().as_default(): + + @tf.function() + def fun2(x): + return tf.multiply(x,x) + + @tf.function() + def fun3(x): + y = fun2(x) + z = tf.add(x, y) + return z + + t3 = fun3(tf.constant(10)) + + compare_tf_with_tvm([], [], [t3.name], mode='vm', init_global_variables=True) + +def _test_spop_function_invocation_defun(): + tf.reset_default_graph() + with tf.Graph().as_default(): + + def fun1(a): + return tf.multiply(a,a) + + def fun2(b): + return tf.multiply(b,b) + @function.Defun(dtypes.float32, dtypes.float32, func_name="Fun3") def fun3(x,y): x = fun2(x) @@ -3328,6 +3407,13 @@ def _test_spop_placeholder(): _test_spop_placeholder_three() _test_spop_placeholder_four() +def _test_spop_function_invocation(): + _test_spop_function_invocation_simple() + _test_spop_function_invocation_params() + _test_spop_function_invocation_one() + _test_spop_function_invocation_callable_graph() + _test_spop_function_invocation_defun() + def test_forward_spop_positive(): _test_spop_placeholder() _test_spop_function_invocation() From b77cece20ced520fdd62c9906d259215ee3aadf0 Mon Sep 17 00:00:00 2001 From: deepak Date: Fri, 15 May 2020 07:38:14 +0530 Subject: [PATCH 27/55] Simplified & Merged different Function Invocation Test cases --- .../frontend/tensorflow/test_forward.py | 46 ++----------------- 1 file changed, 4 insertions(+), 42 deletions(-) diff --git a/tests/python/frontend/tensorflow/test_forward.py b/tests/python/frontend/tensorflow/test_forward.py index d72e77bbc093..7e5c853fabbd 100644 --- a/tests/python/frontend/tensorflow/test_forward.py +++ b/tests/python/frontend/tensorflow/test_forward.py @@ -3233,7 +3233,7 @@ def add(x, y): t3 = add(t1, t2) compare_tf_with_tvm([t1_data, t2_data], ['t1:0', 't2:0'], [t3.name], mode='vm', init_global_variables=True) -def _test_spop_function_invocation_one(): +def _test_spop_function_invocation_basic(): tf.disable_eager_execution() tf.reset_default_graph() with tf.Graph().as_default(): @@ -3255,7 +3255,7 @@ def fun3(x,y): compare_tf_with_tvm([], [], [t3.name], mode='vm', init_global_variables=True) -def _test_spop_function_invocation_callable_graph(): +def _test_spop_function_invocation_autograph(): tf.disable_eager_execution() tf.reset_default_graph() with tf.Graph().as_default(): @@ -3279,42 +3279,6 @@ def fun3(x,y): compare_tf_with_tvm([], [], [t3.name], mode='vm', init_global_variables=True) -def _test_spop_function_invocation_simple(): - tf.disable_eager_execution() - tf.reset_default_graph() - with tf.Graph().as_default(): - - @tf.function() - def fun2(): - return tf.constant(1) - - @tf.function() - def fun3(): - return fun2() - - t3 = fun3() - - compare_tf_with_tvm([], [], [t3.name], mode='vm', init_global_variables=True) - -def _test_spop_function_invocation_params(): - tf.disable_eager_execution() - tf.reset_default_graph() - with tf.Graph().as_default(): - - @tf.function() - def fun2(x): - return tf.multiply(x,x) - - @tf.function() - def fun3(x): - y = fun2(x) - z = tf.add(x, y) - return z - - t3 = fun3(tf.constant(10)) - - compare_tf_with_tvm([], [], [t3.name], mode='vm', init_global_variables=True) - def _test_spop_function_invocation_defun(): tf.reset_default_graph() with tf.Graph().as_default(): @@ -3408,10 +3372,8 @@ def _test_spop_placeholder(): _test_spop_placeholder_four() def _test_spop_function_invocation(): - _test_spop_function_invocation_simple() - _test_spop_function_invocation_params() - _test_spop_function_invocation_one() - _test_spop_function_invocation_callable_graph() + _test_spop_function_invocation_basic() + _test_spop_function_invocation_autograph() _test_spop_function_invocation_defun() def test_forward_spop_positive(): From 6c71b72d4cf0e34ef32326a5cdc9fbedde9cdc7f Mon Sep 17 00:00:00 2001 From: Prashant Sail Date: Fri, 15 May 2020 15:35:40 +0530 Subject: [PATCH 28/55] support invocation of nested callables no need to explicitly handle paratitioned and statefulPartitioned condition in convert_operator function --- python/tvm/relay/frontend/tensorflow.py | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/python/tvm/relay/frontend/tensorflow.py b/python/tvm/relay/frontend/tensorflow.py index 56424a3b7a47..071f4e9a1255 100644 --- a/python/tvm/relay/frontend/tensorflow.py +++ b/python/tvm/relay/frontend/tensorflow.py @@ -1931,30 +1931,37 @@ def _impl(inputs, attr, params, mod): return _impl def _partitioned_call(): - def _impl(inputs, attr, params, mod, graph): + from tensorflow.python.framework import function_def_to_graph + from tensorflow.python.framework import ops + + def _impl(inputs, attr, params, mod): node_func_name = attr.get('f').name - func = next((f for f in graph.library.function if f.signature.name == node_func_name), None) - if func: - from tensorflow.python.framework import function_def_to_graph + outer_graph = ops.get_default_graph() + outer_graph_def = outer_graph.as_graph_def(add_shapes=True) + + func = next((f for f in outer_graph_def.library.function if f.signature.name == node_func_name), None) + if func: # Convert function definition to graph func_input_shapes = func.attr["_input_shapes"].list.shape - subgraph, flat_tensor_name = function_def_to_graph.function_def_to_graph_def(func, func_input_shapes) + subgraph = function_def_to_graph.function_def_to_graph(func, func_input_shapes) + subgraph_def = subgraph.as_graph_def(add_shapes=True) # Computing subgraph's input shape dictionary subgraph_shape_dict = {} for f_arg, input in zip(func.signature.input_arg, inputs): subgraph_shape_dict[f_arg.name] = _infer_shape(input) - # Construct relay nodes from the subgraph + # Construct relay nodes from the subgraph_def g = GraphProto() - mod, params = g.from_tensorflow(subgraph, shape=subgraph_shape_dict) + mod, params = g.from_tensorflow(subgraph_def, shape=subgraph_shape_dict) wl = tvm.relay.var('partitioned_call') sb = tvm.relay.scope_builder.ScopeBuilder() sb.let(wl, mod["main"]) sb.ret(wl(*inputs)) op = sb.get() return op + return _impl def _stateful_partitioned_call(): @@ -3222,9 +3229,7 @@ def _convert_operator(self, op_name, inputs, attrs, if op_name in identity_list: sym = get_relay_op(op_name)(*inputs, **attrs) elif op_name in convert_map: - if op_name in ["PartitionedCall", "StatefulPartitionedCall"]: - sym = convert_map[op_name](inputs, attrs, self._params, self._mod, self._graph) - elif _need_prelude_for_shape_inference(op_name): + if _need_prelude_for_shape_inference(op_name): sym = convert_map[op_name](inputs, attrs, self._params, self._prelude) else: sym = convert_map[op_name](inputs, attrs, self._params, self._mod) From b253ac9daa44c92369967d823368b08489ea4a43 Mon Sep 17 00:00:00 2001 From: Prashant Sail Date: Fri, 15 May 2020 15:37:08 +0530 Subject: [PATCH 29/55] Simplified and Uniform testcases --- .../frontend/tensorflow/test_forward.py | 84 +++++++++++-------- 1 file changed, 48 insertions(+), 36 deletions(-) diff --git a/tests/python/frontend/tensorflow/test_forward.py b/tests/python/frontend/tensorflow/test_forward.py index ec954fe6c109..b7279dfa76b1 100644 --- a/tests/python/frontend/tensorflow/test_forward.py +++ b/tests/python/frontend/tensorflow/test_forward.py @@ -3168,10 +3168,7 @@ def test_forward_isfinite(): _verify_infiniteness_ops(tf.is_finite, "isfinite") def _test_spop_placeholder_one(): - print("Inside placeholder function") - tf.reset_default_graph() - g = tf.Graph() - with g.as_default(): + with tf.Graph().as_default(): @function.Defun(*[tf.int32]*2) def Forward(x,y): @@ -3191,7 +3188,6 @@ def Forward(x,y): ['StatefulPartitionedCall:0',z2.name], mode='vm', init_global_variables=True) def _test_spop_placeholder_two(): - with tf.Graph().as_default(): data = np.ones([1], dtype=int).astype(np.int32) dataVar = tf.Variable(data, shape=data.shape) @@ -3206,38 +3202,36 @@ def pl_with_default(pl): compare_tf_with_tvm(data, ['pl1:0'], 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) def _test_spop_placeholder_three(): - tf.disable_eager_execution() - t1 = tf.placeholder(tf.int32, (3, 3, 3), "t1") - t1_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3)) - t2 = tf.placeholder(tf.int32, (3, 3, 3), "t2") - t2_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3)) + with tf.Graph().as_default(): + t1 = tf.placeholder(tf.int32, (3, 3, 3), "t1") + t1_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3)) + t2 = tf.placeholder(tf.int32, (3, 3, 3), "t2") + t2_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3)) - # @tf.function(input_signature=[tf.TensorSpec(shape=(3, 3, 3), dtype=tf.int32), - # tf.TensorSpec(shape=(3, 3, 3), dtype=tf.int32)]) - @tf.function - def add(x, y): - return tf.add(x, y, "add_t1_t2") + # @tf.function(input_signature=[tf.TensorSpec(shape=(3, 3, 3), dtype=tf.int32), + # tf.TensorSpec(shape=(3, 3, 3), dtype=tf.int32)]) + @tf.function + def add(x, y): + return tf.add(x, y, "add_t1_t2") - t3 = add(t1, t2) - compare_tf_with_tvm([t1_data, t2_data], ['t1:0', 't2:0'], [t3.name], mode='vm', init_global_variables=True) + t3 = add(t1, t2) + compare_tf_with_tvm([t1_data, t2_data], ['t1:0', 't2:0'], [t3.name], mode='vm', init_global_variables=True) def _test_spop_placeholder_four(): - tf.disable_eager_execution() - t1_data = np.array([[-1, 1, 3], [2, -2, 4], [2, -3, 14]], dtype=np.int32) - t2_data = np.array([[-2, 1, 2], [12, -2, 14], [12, -3, 4]], dtype=np.int32) - tf.reset_default_graph() - t1 = tf.placeholder(tf.int32, name="t1") - t2 = tf.placeholder(tf.int32, name="t2") + with tf.Graph().as_default(): + t1_data = np.array([[-1, 1, 3], [2, -2, 4], [2, -3, 14]], dtype=np.int32) + t2_data = np.array([[-2, 1, 2], [12, -2, 14], [12, -3, 4]], dtype=np.int32) + t1 = tf.placeholder(tf.int32, name="t1") + t2 = tf.placeholder(tf.int32, name="t2") - @tf.function - def add(x, y): - return tf.add(x, y, "add_t1_t2") + @tf.function + def add(x, y): + return tf.add(x, y, "add_t1_t2") - t3 = add(t1, t2) - compare_tf_with_tvm([t1_data, t2_data], ['t1:0', 't2:0'], [t3.name], mode='vm', init_global_variables=True) + t3 = add(t1, t2) + compare_tf_with_tvm([t1_data, t2_data], ['t1:0', 't2:0'], [t3.name], mode='vm', init_global_variables=True) def _test_spop_function_invocation(): - tf.reset_default_graph() with tf.Graph().as_default(): def fun1(a): @@ -3257,8 +3251,30 @@ def fun3(x,y): Tout=[dtypes.float32], f=fun3, name="SpopFnInvocation") compare_tf_with_tvm([],[], 'SpopFnInvocation:0', mode='vm', init_global_variables=True) +def _test_spop_function_invocation_2(): + with tf.Graph().as_default(): + t1 = tf.compat.v1.placeholder(tf.int32, (3, 3, 3), name="t1") + t1_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3)) + t2 = tf.compat.v1.placeholder(tf.int32, name="t2") + t2_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3)) + + @tf.function + def myfunc(x, y): + return tf.add(x, y, "myfunc") + + @tf.function + def myfunc2(x, y): + z = myfunc(x, y) + l = myfunc(z, y) + m = myfunc(l,z) + return tf.add(l, m, "myfunc2") + + res1 = myfunc(t1, t2) + res2 = myfunc2(res1, t1) + + compare_tf_with_tvm([t1_data, t2_data], ['t1:0', 't2:0'], [res2.name], mode='vm', init_global_variables=True) + def _test_spop_arithmetic(): - tf.reset_default_graph() with tf.Graph().as_default(): @function.Defun(*[dtypes.int32]*3) def arithmetic(m,x,c): @@ -3273,7 +3289,6 @@ def arithmetic(m,x,c): compare_tf_with_tvm([],[],'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) def _test_spop_control_flow(): - tf.reset_default_graph() with tf.Graph().as_default(): @function.Defun(*[dtypes.float32] * 2) @@ -3292,10 +3307,7 @@ def Body1(x, y): compare_tf_with_tvm([], [], 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) def _test_spop_variables(): - tf.reset_default_graph() - g = tf.Graph() - with g.as_default(): - + with tf.Graph().as_default(): const1 = tf.constant(10) const2 = tf.constant(20) var1 = tf.Variable(const1, dtype=tf.int32) @@ -3309,7 +3321,6 @@ def Forward(x,y): compare_tf_with_tvm([], [], 'StatefulPartitionedCall:0', init_global_variables=True, mode="vm") def _test_spop_constants(): - tf.reset_default_graph() with tf.Graph().as_default(): @function.Defun(*[dtypes.int32] * 2) def constantsFn(x, y): @@ -3331,6 +3342,7 @@ def _test_spop_placeholder(): def test_forward_spop_positive(): _test_spop_placeholder() _test_spop_function_invocation() + _test_spop_function_invocation_2() _test_spop_arithmetic() _test_spop_control_flow() _test_spop_variables() From 878313da05bccb0cc14f1059c85b1e0b640c07bc Mon Sep 17 00:00:00 2001 From: Prashant Sail Date: Fri, 15 May 2020 15:35:40 +0530 Subject: [PATCH 30/55] support invocation of nested callables no need to explicitly handle paratitioned and statefulPartitioned condition in convert_operator function --- python/tvm/relay/frontend/tensorflow.py | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/python/tvm/relay/frontend/tensorflow.py b/python/tvm/relay/frontend/tensorflow.py index 56424a3b7a47..071f4e9a1255 100644 --- a/python/tvm/relay/frontend/tensorflow.py +++ b/python/tvm/relay/frontend/tensorflow.py @@ -1931,30 +1931,37 @@ def _impl(inputs, attr, params, mod): return _impl def _partitioned_call(): - def _impl(inputs, attr, params, mod, graph): + from tensorflow.python.framework import function_def_to_graph + from tensorflow.python.framework import ops + + def _impl(inputs, attr, params, mod): node_func_name = attr.get('f').name - func = next((f for f in graph.library.function if f.signature.name == node_func_name), None) - if func: - from tensorflow.python.framework import function_def_to_graph + outer_graph = ops.get_default_graph() + outer_graph_def = outer_graph.as_graph_def(add_shapes=True) + + func = next((f for f in outer_graph_def.library.function if f.signature.name == node_func_name), None) + if func: # Convert function definition to graph func_input_shapes = func.attr["_input_shapes"].list.shape - subgraph, flat_tensor_name = function_def_to_graph.function_def_to_graph_def(func, func_input_shapes) + subgraph = function_def_to_graph.function_def_to_graph(func, func_input_shapes) + subgraph_def = subgraph.as_graph_def(add_shapes=True) # Computing subgraph's input shape dictionary subgraph_shape_dict = {} for f_arg, input in zip(func.signature.input_arg, inputs): subgraph_shape_dict[f_arg.name] = _infer_shape(input) - # Construct relay nodes from the subgraph + # Construct relay nodes from the subgraph_def g = GraphProto() - mod, params = g.from_tensorflow(subgraph, shape=subgraph_shape_dict) + mod, params = g.from_tensorflow(subgraph_def, shape=subgraph_shape_dict) wl = tvm.relay.var('partitioned_call') sb = tvm.relay.scope_builder.ScopeBuilder() sb.let(wl, mod["main"]) sb.ret(wl(*inputs)) op = sb.get() return op + return _impl def _stateful_partitioned_call(): @@ -3222,9 +3229,7 @@ def _convert_operator(self, op_name, inputs, attrs, if op_name in identity_list: sym = get_relay_op(op_name)(*inputs, **attrs) elif op_name in convert_map: - if op_name in ["PartitionedCall", "StatefulPartitionedCall"]: - sym = convert_map[op_name](inputs, attrs, self._params, self._mod, self._graph) - elif _need_prelude_for_shape_inference(op_name): + if _need_prelude_for_shape_inference(op_name): sym = convert_map[op_name](inputs, attrs, self._params, self._prelude) else: sym = convert_map[op_name](inputs, attrs, self._params, self._mod) From ff11b3931d96a0bfe1bfea536effea04d127985c Mon Sep 17 00:00:00 2001 From: Prashant Sail Date: Fri, 15 May 2020 15:37:08 +0530 Subject: [PATCH 31/55] Simplified and Uniform testcases --- .../frontend/tensorflow/test_forward.py | 84 +++++++++++-------- 1 file changed, 47 insertions(+), 37 deletions(-) diff --git a/tests/python/frontend/tensorflow/test_forward.py b/tests/python/frontend/tensorflow/test_forward.py index 7e5c853fabbd..7a6296bc0b7b 100644 --- a/tests/python/frontend/tensorflow/test_forward.py +++ b/tests/python/frontend/tensorflow/test_forward.py @@ -3168,9 +3168,7 @@ def test_forward_isfinite(): _verify_infiniteness_ops(tf.is_finite, "isfinite") def _test_spop_placeholder_one(): - tf.reset_default_graph() - g = tf.Graph() - with g.as_default(): + with tf.Graph().as_default(): @function.Defun(*[tf.int32]*2) def Forward(x,y): @@ -3190,7 +3188,6 @@ def Forward(x,y): ['StatefulPartitionedCall:0',z2.name], mode='vm', init_global_variables=True) def _test_spop_placeholder_two(): - with tf.Graph().as_default(): data = np.ones([1], dtype=int).astype(np.int32) dataVar = tf.Variable(data, shape=data.shape) @@ -3205,37 +3202,35 @@ def pl_with_default(pl): compare_tf_with_tvm(data, ['pl1:0'], 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) def _test_spop_placeholder_three(): - tf.disable_eager_execution() - t1 = tf.placeholder(tf.int32, (3, 3, 3), "t1") - t1_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3)) - t2 = tf.placeholder(tf.int32, (3, 3, 3), "t2") - t2_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3)) + with tf.Graph().as_default(): + t1 = tf.placeholder(tf.int32, (3, 3, 3), "t1") + t1_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3)) + t2 = tf.placeholder(tf.int32, (3, 3, 3), "t2") + t2_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3)) - @tf.function - def add(x, y): - return tf.add(x, y, "add_t1_t2") + @tf.function + def add(x, y): + return tf.add(x, y, "add_t1_t2") - t3 = add(t1, t2) - compare_tf_with_tvm([t1_data, t2_data], ['t1:0', 't2:0'], [t3.name], mode='vm', init_global_variables=True) + t3 = add(t1, t2) + compare_tf_with_tvm([t1_data, t2_data], ['t1:0', 't2:0'], [t3.name], mode='vm', init_global_variables=True) def _test_spop_placeholder_four(): - tf.disable_eager_execution() - t1_data = np.array([[-1, 1, 3], [2, -2, 4], [2, -3, 14]], dtype=np.int32) - t2_data = np.array([[-2, 1, 2], [12, -2, 14], [12, -3, 4]], dtype=np.int32) - tf.reset_default_graph() - t1 = tf.placeholder(tf.int32, name="t1") - t2 = tf.placeholder(tf.int32, name="t2") + with tf.Graph().as_default(): + t1_data = np.array([[-1, 1, 3], [2, -2, 4], [2, -3, 14]], dtype=np.int32) + t2_data = np.array([[-2, 1, 2], [12, -2, 14], [12, -3, 4]], dtype=np.int32) + t1 = tf.placeholder(tf.int32, name="t1") + t2 = tf.placeholder(tf.int32, name="t2") + + @tf.function + def add(x, y): + return tf.add(x, y, "add_t1_t2") - @tf.function - def add(x, y): - return tf.add(x, y, "add_t1_t2") + t3 = add(t1, t2) + compare_tf_with_tvm([t1_data, t2_data], ['t1:0', 't2:0'], [t3.name], mode='vm', init_global_variables=True) - t3 = add(t1, t2) - compare_tf_with_tvm([t1_data, t2_data], ['t1:0', 't2:0'], [t3.name], mode='vm', init_global_variables=True) def _test_spop_function_invocation_basic(): - tf.disable_eager_execution() - tf.reset_default_graph() with tf.Graph().as_default(): def fun1(a): @@ -3255,9 +3250,30 @@ def fun3(x,y): compare_tf_with_tvm([], [], [t3.name], mode='vm', init_global_variables=True) +def _test_spop_function_invocation_basic2(): + with tf.Graph().as_default(): + t1 = tf.compat.v1.placeholder(tf.int32, (3, 3, 3), name="t1") + t1_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3)) + t2 = tf.compat.v1.placeholder(tf.int32, name="t2") + t2_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3)) + + @tf.function + def myfunc(x, y): + return tf.add(x, y, "myfunc") + + @tf.function + def myfunc2(x, y): + z = myfunc(x, y) + l = myfunc(z, y) + m = myfunc(l,z) + return tf.add(l, m, "myfunc2") + + res1 = myfunc(t1, t2) + res2 = myfunc2(res1, t1) + + compare_tf_with_tvm([t1_data, t2_data], ['t1:0', 't2:0'], [res2.name], mode='vm', init_global_variables=True) + def _test_spop_function_invocation_autograph(): - tf.disable_eager_execution() - tf.reset_default_graph() with tf.Graph().as_default(): @tf.function @@ -3280,7 +3296,6 @@ def fun3(x,y): compare_tf_with_tvm([], [], [t3.name], mode='vm', init_global_variables=True) def _test_spop_function_invocation_defun(): - tf.reset_default_graph() with tf.Graph().as_default(): def fun1(a): @@ -3301,7 +3316,6 @@ def fun3(x,y): compare_tf_with_tvm([],[], 'SpopFnInvocation:0', mode='vm', init_global_variables=True) def _test_spop_arithmetic(): - tf.reset_default_graph() with tf.Graph().as_default(): @function.Defun(*[dtypes.int32]*3) def arithmetic(m,x,c): @@ -3316,7 +3330,6 @@ def arithmetic(m,x,c): compare_tf_with_tvm([],[],'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) def _test_spop_control_flow(): - tf.reset_default_graph() with tf.Graph().as_default(): @function.Defun(*[dtypes.float32] * 2) @@ -3335,10 +3348,7 @@ def Body1(x, y): compare_tf_with_tvm([], [], 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) def _test_spop_variables(): - tf.reset_default_graph() - g = tf.Graph() - with g.as_default(): - + with tf.Graph().as_default(): const1 = tf.constant(10) const2 = tf.constant(20) var1 = tf.Variable(const1, dtype=tf.int32) @@ -3352,7 +3362,6 @@ def Forward(x,y): compare_tf_with_tvm([], [], 'StatefulPartitionedCall:0', init_global_variables=True, mode="vm") def _test_spop_constants(): - tf.reset_default_graph() with tf.Graph().as_default(): @function.Defun(*[dtypes.int32] * 2) def constantsFn(x, y): @@ -3373,6 +3382,7 @@ def _test_spop_placeholder(): def _test_spop_function_invocation(): _test_spop_function_invocation_basic() + _test_spop_function_invocation_basic2() _test_spop_function_invocation_autograph() _test_spop_function_invocation_defun() From 14982af16b386e6293a723f00a0977921dbac61b Mon Sep 17 00:00:00 2001 From: Prashant Sail Date: Fri, 15 May 2020 16:05:30 +0530 Subject: [PATCH 32/55] removed duplicate and renamed testcase --- .../frontend/tensorflow/test_forward.py | 28 ++----------------- 1 file changed, 2 insertions(+), 26 deletions(-) diff --git a/tests/python/frontend/tensorflow/test_forward.py b/tests/python/frontend/tensorflow/test_forward.py index 05057bac477c..0bd18a48f810 100644 --- a/tests/python/frontend/tensorflow/test_forward.py +++ b/tests/python/frontend/tensorflow/test_forward.py @@ -3249,7 +3249,7 @@ def fun3(x,y): compare_tf_with_tvm([], [], [t3.name], mode='vm', init_global_variables=True) -def _test_spop_function_invocation_basic2(): +def _test_spop_function_invocation_nested(): with tf.Graph().as_default(): t1 = tf.compat.v1.placeholder(tf.int32, (3, 3, 3), name="t1") t1_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3)) @@ -3314,29 +3314,6 @@ def fun3(x,y): Tout=[dtypes.float32], f=fun3, name="SpopFnInvocation") compare_tf_with_tvm([],[], 'SpopFnInvocation:0', mode='vm', init_global_variables=True) -def _test_spop_function_invocation_2(): - with tf.Graph().as_default(): - t1 = tf.compat.v1.placeholder(tf.int32, (3, 3, 3), name="t1") - t1_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3)) - t2 = tf.compat.v1.placeholder(tf.int32, name="t2") - t2_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3)) - - @tf.function - def myfunc(x, y): - return tf.add(x, y, "myfunc") - - @tf.function - def myfunc2(x, y): - z = myfunc(x, y) - l = myfunc(z, y) - m = myfunc(l,z) - return tf.add(l, m, "myfunc2") - - res1 = myfunc(t1, t2) - res2 = myfunc2(res1, t1) - - compare_tf_with_tvm([t1_data, t2_data], ['t1:0', 't2:0'], [res2.name], mode='vm', init_global_variables=True) - def _test_spop_arithmetic(): with tf.Graph().as_default(): @function.Defun(*[dtypes.int32]*3) @@ -3404,14 +3381,13 @@ def _test_spop_placeholder(): def _test_spop_function_invocation(): _test_spop_function_invocation_basic() - _test_spop_function_invocation_basic2() + _test_spop_function_invocation_nested() _test_spop_function_invocation_autograph() _test_spop_function_invocation_defun() def test_forward_spop_positive(): _test_spop_placeholder() _test_spop_function_invocation() - _test_spop_function_invocation_2() _test_spop_arithmetic() _test_spop_control_flow() _test_spop_variables() From 332e2f07472a60d783e8426674902f61bd010a14 Mon Sep 17 00:00:00 2001 From: deepak Date: Fri, 15 May 2020 16:24:48 +0530 Subject: [PATCH 33/55] Negative scenario added for testing operator statefulness. Only Exception to stateful operators are Partitioned & StatefulPartitionedOp which have capability to execute even stateless operators within them --- python/tvm/relay/frontend/tensorflow.py | 9 ++ .../frontend/tensorflow/test_forward.py | 120 ++++++++++++++++++ 2 files changed, 129 insertions(+) diff --git a/python/tvm/relay/frontend/tensorflow.py b/python/tvm/relay/frontend/tensorflow.py index 56424a3b7a47..0a58441676c9 100644 --- a/python/tvm/relay/frontend/tensorflow.py +++ b/python/tvm/relay/frontend/tensorflow.py @@ -2931,6 +2931,12 @@ def _parse_import_prerequisites(self, graph): """ missing_operators = set() for node in graph.node: + try: + from tensorflow.python.framework import op_def_registry + except ImportError as e: + raise ImportError( + "Unable to import tensorflow which is required {}".format(e)) + op_def = op_def_registry._registered_ops.get(node.op) if node.op == "Placeholder" or node.op == 'PlaceholderWithDefault': pass elif node.op == "Const": @@ -2940,6 +2946,9 @@ def _parse_import_prerequisites(self, graph): _convert_map_rnn, _control_flow_nodes]]): pass + elif op_def is not None and op_def.is_stateful: + raise Exception("Found {} stateful operator in this graph. " + "Rejecting the graph as TVM does not support stateful operations ".format(node.op)) else: missing_operators.add(node.op) diff --git a/tests/python/frontend/tensorflow/test_forward.py b/tests/python/frontend/tensorflow/test_forward.py index 7e5c853fabbd..97f31c1de04b 100644 --- a/tests/python/frontend/tensorflow/test_forward.py +++ b/tests/python/frontend/tensorflow/test_forward.py @@ -40,6 +40,7 @@ from tensorflow.python.framework import ops from tensorflow.python.framework import dtypes from tensorflow.python.ops import gen_functional_ops +from tensorflow.python.framework import op_def_registry from distutils.version import LooseVersion import tvm from tvm import te @@ -3376,6 +3377,117 @@ def _test_spop_function_invocation(): _test_spop_function_invocation_autograph() _test_spop_function_invocation_defun() +def _get_attr(self, buf): + """Returns the value of the attr of this buf with the given `name`. + + Args: + buf: attrvalue protobuf. + + Returns: + The value of the attr, as a Python object. + + Raises: + ValueError: If this op does not have an attr with the given `name`. + """ + fields = ["s", "i", "f", "b", "type", "shape", "tensor", "func"] + + x = buf + + ret = [] + + try: + from tensorflow.python.framework import dtypes + except ImportError as e: + raise ImportError( + "Unable to import tensorflow which is required {}".format(e)) + + # Treat an empty oneof value as an empty list. + if not x.WhichOneof("value"): + return ret + if x.HasField("list"): + for f in fields: + if getattr(x.list, f): + if f == "type": + ret += [dtypes.as_dtype(x) for x in list(getattr(x.list, f))] + else: + ret += list(getattr(x.list, f)) + else: + for f in fields: + if x.HasField(f): + if f == "type": + ret = dtypes.as_dtype(getattr(x, f)) + else: + ret = getattr(x, f) + return ret + +def _test_spop_stateful_test(): + + tf.reset_default_graph() + with tf.Graph().as_default(): + + @tf.function + def FunctionWithStatefulOp_One(i): + b = tf.random.uniform(shape=[2, 4], maxval=10, dtype=tf.float32, seed=10) + y = tf.multiply(b, i) + return y + + @tf.function + def FunctionWithStatefulOp(m, n): + a = tf.random.uniform(shape=[2, 4], maxval=10, dtype=tf.float32, seed = 10) + x = tf.multiply(a,m) + y = FunctionWithStatefulOp_One(n) + z = tf.multiply(x,y) + return z + + op = FunctionWithStatefulOp(constant_op.constant(1.), constant_op.constant(2.)) + compare_tf_with_tvm([], [], [op.name], init_global_variables=True, mode="vm") + +def _test_spop_device_assignment(): + + tf.reset_default_graph() + with tf.Graph().as_default(): + + def fun1(a): + with ops.device("/job:localhost/replica:0/task:0/device:CPU:1"): + return tf.multiply(a,a) + + def fun2(b): + with ops.device("/job:localhost/replica:0/task:0/device:CPU:2"): + return tf.multiply(b,b) + + @function.Defun(dtypes.float32, dtypes.float32, func_name="Fun3") + def fun3(x,y): + with ops.device("/GPU:2"): + x = fun2(x) + y = fun1(y) + z = tf.add(x,y) + return z + + op = gen_functional_ops.StatefulPartitionedCall(args=[tf.constant(10.5),tf.constant(20.4)], + Tout=[dtypes.float32], f=fun3) + try: + from tensorflow.core.protobuf import config_pb2 + except ImportError as e: + raise ImportError( + "Unable to import tensorflow which is required {}".format(e)) + + run_options = config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE) + run_metadata = config_pb2.RunMetadata() + with tf.Session(config=config_pb2.ConfigProto(device_count={"CPU": 2, "GPU": 2})) as sess: + sess.run(tf.global_variables_initializer()) + print("The output of device assignment run is = ", + sess.run(op, options=run_options, run_metadata=run_metadata)) + assignedDevicesSet = set() + for func in run_metadata.step_stats.dev_stats: + print("device used: ", repr(func.device)) + assignedDevicesSet.add(func.device) + if (len(assignedDevicesSet) > 1): + raise Exception("Device assignment is not consistent. Rejecting the graph") + compare_tf_with_tvm([],[], 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) + +def _test_spop_resource_variables_test(): + pass + def test_forward_spop_positive(): _test_spop_placeholder() _test_spop_function_invocation() @@ -3384,12 +3496,20 @@ def test_forward_spop_positive(): _test_spop_variables() _test_spop_constants() +def test_forward_spop_negative(): + #Uncomment the following test case to test that TVM rejects any TF stateful operations + # except StatefulPartitionedCall/PartitionedCall(as these two operators can still be used + # as container graphs to execute "stateless" operations internally + # _test_spop_stateful_test() + _test_spop_device_assignment() + _test_spop_resource_variables_test() ####################################################################### # Main # ---- if __name__ == '__main__': # StatefulPartitionedOp test_forward_spop_positive() + test_forward_spop_negative() # Transforms test_forward_slice() test_forward_transpose() From 7ea2a6645d838d458141bf5eeebdc8a613a90e0f Mon Sep 17 00:00:00 2001 From: deepak Date: Fri, 15 May 2020 16:32:14 +0530 Subject: [PATCH 34/55] Miscellaneous reorganization changes for spop scenarios --- .../frontend/tensorflow/test_forward.py | 58 ++++--------------- 1 file changed, 11 insertions(+), 47 deletions(-) diff --git a/tests/python/frontend/tensorflow/test_forward.py b/tests/python/frontend/tensorflow/test_forward.py index f4b7ce1803ea..2cbc893772cc 100644 --- a/tests/python/frontend/tensorflow/test_forward.py +++ b/tests/python/frontend/tensorflow/test_forward.py @@ -3252,9 +3252,9 @@ def fun3(x,y): def _test_spop_function_invocation_nested(): with tf.Graph().as_default(): - t1 = tf.compat.v1.placeholder(tf.int32, (3, 3, 3), name="t1") + t1 = tf.placeholder(tf.int32, (3, 3, 3), name="t1") t1_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3)) - t2 = tf.compat.v1.placeholder(tf.int32, name="t2") + t2 = tf.placeholder(tf.int32, name="t2") t2_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3)) @tf.function @@ -3386,49 +3386,6 @@ def _test_spop_function_invocation(): _test_spop_function_invocation_autograph() _test_spop_function_invocation_defun() -def _get_attr(self, buf): - """Returns the value of the attr of this buf with the given `name`. - - Args: - buf: attrvalue protobuf. - - Returns: - The value of the attr, as a Python object. - - Raises: - ValueError: If this op does not have an attr with the given `name`. - """ - fields = ["s", "i", "f", "b", "type", "shape", "tensor", "func"] - - x = buf - - ret = [] - - try: - from tensorflow.python.framework import dtypes - except ImportError as e: - raise ImportError( - "Unable to import tensorflow which is required {}".format(e)) - - # Treat an empty oneof value as an empty list. - if not x.WhichOneof("value"): - return ret - if x.HasField("list"): - for f in fields: - if getattr(x.list, f): - if f == "type": - ret += [dtypes.as_dtype(x) for x in list(getattr(x.list, f))] - else: - ret += list(getattr(x.list, f)) - else: - for f in fields: - if x.HasField(f): - if f == "type": - ret = dtypes.as_dtype(getattr(x, f)) - else: - ret = getattr(x, f) - return ret - def _test_spop_stateful_test(): tf.reset_default_graph() @@ -3497,7 +3454,7 @@ def fun3(x,y): def _test_spop_resource_variables_test(): pass -def test_forward_spop_positive(): +def _test_forward_spop_positive(): _test_spop_placeholder() _test_spop_function_invocation() _test_spop_arithmetic() @@ -3505,18 +3462,25 @@ def test_forward_spop_positive(): _test_spop_variables() _test_spop_constants() -def test_forward_spop_negative(): +def _test_forward_spop_negative(): #Uncomment the following test case to test that TVM rejects any TF stateful operations # except StatefulPartitionedCall/PartitionedCall(as these two operators can still be used # as container graphs to execute "stateless" operations internally # _test_spop_stateful_test() _test_spop_device_assignment() _test_spop_resource_variables_test() + +def test_forward_spop(): + _test_forward_spop_positive() + _test_forward_spop_negative() + + ####################################################################### # Main # ---- if __name__ == '__main__': # StatefulPartitionedOp + test_forward_spop() test_forward_spop_positive() test_forward_spop_negative() # Transforms From da79752556587ea97d4711209376778aef4ebb9f Mon Sep 17 00:00:00 2001 From: deepak Date: Fri, 15 May 2020 16:48:14 +0530 Subject: [PATCH 35/55] Miscellaneous reorganization changes for spop scenarios --- tests/python/frontend/tensorflow/test_forward.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/tests/python/frontend/tensorflow/test_forward.py b/tests/python/frontend/tensorflow/test_forward.py index 2cbc893772cc..c3c1767cfbfd 100644 --- a/tests/python/frontend/tensorflow/test_forward.py +++ b/tests/python/frontend/tensorflow/test_forward.py @@ -3414,18 +3414,20 @@ def _test_spop_device_assignment(): with tf.Graph().as_default(): def fun1(a): - with ops.device("/job:localhost/replica:0/task:0/device:CPU:1"): + with ops.device("/GPU:0"): return tf.multiply(a,a) def fun2(b): - with ops.device("/job:localhost/replica:0/task:0/device:CPU:2"): + with ops.device("/job:localhost/replica:0/task:0/device:CPU:1"): return tf.multiply(b,b) @function.Defun(dtypes.float32, dtypes.float32, func_name="Fun3") def fun3(x,y): - with ops.device("/GPU:2"): + with ops.device("/CPU:0"): x = fun2(x) + with ops.device("/job:localhost/replica:0/task:0/device:CPU:1"): y = fun1(y) + with ops.device("/job:localhost/replica:0/task:0/device:CPU:2"): z = tf.add(x,y) return z @@ -3439,7 +3441,7 @@ def fun3(x,y): run_options = config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE) run_metadata = config_pb2.RunMetadata() - with tf.Session(config=config_pb2.ConfigProto(device_count={"CPU": 2, "GPU": 2})) as sess: + with tf.Session(config=config_pb2.ConfigProto(device_count={"CPU": 3, "GPU": 2})) as sess: sess.run(tf.global_variables_initializer()) print("The output of device assignment run is = ", sess.run(op, options=run_options, run_metadata=run_metadata)) @@ -3448,6 +3450,7 @@ def fun3(x,y): print("device used: ", repr(func.device)) assignedDevicesSet.add(func.device) if (len(assignedDevicesSet) > 1): + print("no of devices used are: ",len(assignedDevicesSet)) raise Exception("Device assignment is not consistent. Rejecting the graph") compare_tf_with_tvm([],[], 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) From b3666ce474993d9c8c57517bebb9585a83f86ba0 Mon Sep 17 00:00:00 2001 From: deepak Date: Fri, 15 May 2020 17:18:16 +0530 Subject: [PATCH 36/55] Corrected import of tensorflow modules safely using try except and other code reorganization --- python/tvm/relay/frontend/tensorflow.py | 8 ++++++-- tests/python/frontend/tensorflow/test_forward.py | 8 ++++---- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/python/tvm/relay/frontend/tensorflow.py b/python/tvm/relay/frontend/tensorflow.py index 2a2bed55d329..df45f290599d 100644 --- a/python/tvm/relay/frontend/tensorflow.py +++ b/python/tvm/relay/frontend/tensorflow.py @@ -1931,8 +1931,12 @@ def _impl(inputs, attr, params, mod): return _impl def _partitioned_call(): - from tensorflow.python.framework import function_def_to_graph - from tensorflow.python.framework import ops + try: + from tensorflow.python.framework import function_def_to_graph + from tensorflow.python.framework import ops + except ImportError as e: + raise ImportError( + "Unable to import tensorflow which is required {}".format(e)) def _impl(inputs, attr, params, mod): node_func_name = attr.get('f').name diff --git a/tests/python/frontend/tensorflow/test_forward.py b/tests/python/frontend/tensorflow/test_forward.py index c3c1767cfbfd..fc576542c306 100644 --- a/tests/python/frontend/tensorflow/test_forward.py +++ b/tests/python/frontend/tensorflow/test_forward.py @@ -3386,7 +3386,7 @@ def _test_spop_function_invocation(): _test_spop_function_invocation_autograph() _test_spop_function_invocation_defun() -def _test_spop_stateful_test(): +def _test_spop_stateful(): tf.reset_default_graph() with tf.Graph().as_default(): @@ -3454,7 +3454,7 @@ def fun3(x,y): raise Exception("Device assignment is not consistent. Rejecting the graph") compare_tf_with_tvm([],[], 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) -def _test_spop_resource_variables_test(): +def _test_spop_resource_variables(): pass def _test_forward_spop_positive(): @@ -3469,9 +3469,9 @@ def _test_forward_spop_negative(): #Uncomment the following test case to test that TVM rejects any TF stateful operations # except StatefulPartitionedCall/PartitionedCall(as these two operators can still be used # as container graphs to execute "stateless" operations internally - # _test_spop_stateful_test() + # _test_spop_stateful() _test_spop_device_assignment() - _test_spop_resource_variables_test() + _test_spop_resource_variables() def test_forward_spop(): _test_forward_spop_positive() From caaf8fd3e8b25595fa8efe87ded50bfcb817f8cf Mon Sep 17 00:00:00 2001 From: deepak Date: Fri, 15 May 2020 19:42:44 +0530 Subject: [PATCH 37/55] Negative scenario for resource variables handled --- .../frontend/tensorflow/test_forward.py | 46 ++++++++++++++++++- 1 file changed, 44 insertions(+), 2 deletions(-) diff --git a/tests/python/frontend/tensorflow/test_forward.py b/tests/python/frontend/tensorflow/test_forward.py index fc576542c306..dbaea477bbb0 100644 --- a/tests/python/frontend/tensorflow/test_forward.py +++ b/tests/python/frontend/tensorflow/test_forward.py @@ -3441,7 +3441,7 @@ def fun3(x,y): run_options = config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE) run_metadata = config_pb2.RunMetadata() - with tf.Session(config=config_pb2.ConfigProto(device_count={"CPU": 3, "GPU": 2})) as sess: + with tf.Session() as sess: sess.run(tf.global_variables_initializer()) print("The output of device assignment run is = ", sess.run(op, options=run_options, run_metadata=run_metadata)) @@ -3455,7 +3455,47 @@ def fun3(x,y): compare_tf_with_tvm([],[], 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) def _test_spop_resource_variables(): - pass + tf.reset_default_graph() + with tf.Graph().as_default(): + + const1 = tf.constant(10) + const2 = tf.constant(20) + var1 = tf.Variable(const1, dtype=tf.int32, use_resource=True) + var2 = tf.Variable(const2, dtype=tf.int32, use_resource=True) + + @tf.function + def resourceVariablesTest(x, y): + return tf.multiply(x, y) + + op = resourceVariablesTest(var1,var2) + + def isResourceVariable(var): + try: + from tensorflow.python.ops.variables import RefVariable + from tensorflow.python.ops.resource_variable_ops import ResourceVariable + except ImportError as e: + raise ImportError( + "Unable to import tensorflow which is required {}".format(e)) + return bool(not issubclass(var.__class__,RefVariable) + and issubclass(var.__class__,ResourceVariable)) + + with tf.Session() as sess: + sess.run(tf.global_variables_initializer()) + sess.run(op) + mylist = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) + resVarFound, resVarCnt = False, 0 + for var in mylist: + if isResourceVariable(var): + resVarFound = True + resVarCnt += 1 + print(resVarFound) + for var in mylist: + print(var, var.__class__) + if(resVarCnt > 0): + print("Graph contains {} many resource variables ".format(resVarCnt)) + raise Exception("Graph contains Resource variables, so rejecting the graph") + + compare_tf_with_tvm([], [], 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) def _test_forward_spop_positive(): _test_spop_placeholder() @@ -3471,6 +3511,8 @@ def _test_forward_spop_negative(): # as container graphs to execute "stateless" operations internally # _test_spop_stateful() _test_spop_device_assignment() + #Uncomment the following test case to test that TVM rejects any graph containing resource variables with + #StatefulPartitionedOp _test_spop_resource_variables() def test_forward_spop(): From a525666733947653882ebf891d90e148924ef6d8 Mon Sep 17 00:00:00 2001 From: deepak Date: Fri, 15 May 2020 20:11:18 +0530 Subject: [PATCH 38/55] Documentation update for code --- tests/python/frontend/tensorflow/test_forward.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/tests/python/frontend/tensorflow/test_forward.py b/tests/python/frontend/tensorflow/test_forward.py index dbaea477bbb0..40114ba8850b 100644 --- a/tests/python/frontend/tensorflow/test_forward.py +++ b/tests/python/frontend/tensorflow/test_forward.py @@ -3508,12 +3508,17 @@ def _test_forward_spop_positive(): def _test_forward_spop_negative(): #Uncomment the following test case to test that TVM rejects any TF stateful operations # except StatefulPartitionedCall/PartitionedCall(as these two operators can still be used - # as container graphs to execute "stateless" operations internally + # as container graphs to execute "stateless" operations internally. # _test_spop_stateful() + + # Uncomment the following test case to test that TVM rejects inconsistent device assignment + # while using StatefulPartitionedCall/PartitionedCall operators which in case of TVM will + # be used as container graphs to internally execute "stateless" operations. _test_spop_device_assignment() + #Uncomment the following test case to test that TVM rejects any graph containing resource variables with - #StatefulPartitionedOp - _test_spop_resource_variables() + #StatefulPartitionedOp. + # _test_spop_resource_variables() def test_forward_spop(): _test_forward_spop_positive() From 790c02649e1175c8c3149e344263109fe446933e Mon Sep 17 00:00:00 2001 From: maheshambule Date: Fri, 15 May 2020 22:41:52 +0530 Subject: [PATCH 39/55] SPOP change in function handling --- python/tvm/relay/frontend/tensorflow.py | 90 +++++++++++-------- .../frontend/tensorflow/test_forward.py | 8 +- 2 files changed, 59 insertions(+), 39 deletions(-) diff --git a/python/tvm/relay/frontend/tensorflow.py b/python/tvm/relay/frontend/tensorflow.py index 56424a3b7a47..4d08a82e7126 100644 --- a/python/tvm/relay/frontend/tensorflow.py +++ b/python/tvm/relay/frontend/tensorflow.py @@ -1930,37 +1930,6 @@ def _impl(inputs, attr, params, mod): return _res return _impl -def _partitioned_call(): - def _impl(inputs, attr, params, mod, graph): - node_func_name = attr.get('f').name - func = next((f for f in graph.library.function if f.signature.name == node_func_name), None) - if func: - from tensorflow.python.framework import function_def_to_graph - - # Convert function definition to graph - func_input_shapes = func.attr["_input_shapes"].list.shape - subgraph, flat_tensor_name = function_def_to_graph.function_def_to_graph_def(func, func_input_shapes) - - # Computing subgraph's input shape dictionary - subgraph_shape_dict = {} - for f_arg, input in zip(func.signature.input_arg, inputs): - subgraph_shape_dict[f_arg.name] = _infer_shape(input) - - # Construct relay nodes from the subgraph - g = GraphProto() - mod, params = g.from_tensorflow(subgraph, shape=subgraph_shape_dict) - wl = tvm.relay.var('partitioned_call') - sb = tvm.relay.scope_builder.ScopeBuilder() - sb.let(wl, mod["main"]) - sb.ret(wl(*inputs)) - op = sb.get() - return op - return _impl - -def _stateful_partitioned_call(): - return _partitioned_call() - - # compatible operators that do NOT require any conversion. _identity_list = [] @@ -2064,7 +2033,6 @@ def _stateful_partitioned_call(): 'NotEqual' : _broadcast('not_equal'), 'OneHot' : _one_hot(), 'Pack' : _pack(), - 'PartitionedCall' : _partitioned_call(), 'Pad' : _pad('Pad'), 'PadV2' : _pad('PadV2'), 'Pow' : _elemwise('power'), @@ -2101,7 +2069,6 @@ def _stateful_partitioned_call(): 'Square' : _square(), 'SquaredDifference' : _squared_difference(), 'Squeeze' : _squeeze(), - 'StatefulPartitionedCall' : _stateful_partitioned_call(), 'StopGradient' : _identity(), 'StridedSlice' : _stridedSlice(), 'Sub' : _elemwise('subtract'), @@ -2935,6 +2902,8 @@ def _parse_import_prerequisites(self, graph): pass elif node.op == "Const": pass + elif node.op in ["PartitionedCall", "StatefulPartitionedCall"]: + pass else: if any([node.op in t for t in [_identity_list, _convert_map, _convert_map_rnn, @@ -3190,6 +3159,54 @@ def _convert_control_flow_operator(self, node, inputs, attrs, control_flow_node_ return op + def _partition_call_operator(self, inputs, attr): + node_func_name = attr.get('f').name + func = next((f for f in self._graph.library.function if f.signature.name == node_func_name), None) + if func: + from tensorflow.python.framework import function_def_to_graph + + # Convert function definition to graph + func_input_shapes = func.attr["_input_shapes"].list.shape + subgraph, flat_tensor_name = function_def_to_graph.function_def_to_graph_def(func, func_input_shapes) + + # Computing subgraph's input shape dictionary + subgraph_shape_dict, input_expr_dict = {}, {} + for f_arg, input in zip(func.signature.input_arg, inputs): + subgraph_shape_dict[f_arg.name] = _infer_shape(input) + input_expr_dict[f_arg.name] = input + + outputs = [out.name for out in func.signature.output_arg] + # Construct relay nodes from the subgraph + g = GraphProto() + sub_mod, sub_params = g.from_tensorflow(subgraph, shape=subgraph_shape_dict, outputs=['Add_1']) + self._params.update(sub_params) + + param_exprs = [] + free_vars = [] + for param_expr in sub_mod["main"].params: + # sub_params is subset of mod["main"].params + param_name = param_expr.vid.name_hint + if param_name in sub_params.keys(): + param_exprs.append(param_expr) + self._nodes[param_name] = param_expr + free_vars.append(param_expr) + elif param_name in input_expr_dict.keys(): + param_exprs.append(input_expr_dict[param_name]) + else: + raise Exception("Input parameter {} not found".format(param_name)) + + wl = tvm.relay.var('func_{}'.format(func.signature.name)) + sb = tvm.relay.scope_builder.ScopeBuilder() + func_expr = _function.Function(sub_mod["main"].params, sub_mod["main"].body) + + g1 = tvm.relay.GlobalVar("g1") + self._mod[g1] = func_expr + loop_ret = g1(*param_exprs) + sb.ret(loop_ret) + ret = sb.get() + + return ret + def _convert_operator(self, op_name, inputs, attrs, graph, identity_list=None, convert_map=None): """Convert from Tensorflow operator to relay operator. @@ -3222,9 +3239,7 @@ def _convert_operator(self, op_name, inputs, attrs, if op_name in identity_list: sym = get_relay_op(op_name)(*inputs, **attrs) elif op_name in convert_map: - if op_name in ["PartitionedCall", "StatefulPartitionedCall"]: - sym = convert_map[op_name](inputs, attrs, self._params, self._mod, self._graph) - elif _need_prelude_for_shape_inference(op_name): + if _need_prelude_for_shape_inference(op_name): sym = convert_map[op_name](inputs, attrs, self._params, self._prelude) else: sym = convert_map[op_name](inputs, attrs, self._params, self._mod) @@ -3233,6 +3248,9 @@ def _convert_operator(self, op_name, inputs, attrs, sym = self._convert_rnn_operator(op_name, inputs, attrs, self._params, graph, convert_map_rnn) + + elif op_name in ["PartitionedCall", "StatefulPartitionedCall"]: + sym = self._partition_call_operator(inputs, attrs) else: raise NotImplementedError("Operator {} not implemented.".format(op_name)) return sym diff --git a/tests/python/frontend/tensorflow/test_forward.py b/tests/python/frontend/tensorflow/test_forward.py index 7e5c853fabbd..c6aecaa97b13 100644 --- a/tests/python/frontend/tensorflow/test_forward.py +++ b/tests/python/frontend/tensorflow/test_forward.py @@ -3356,15 +3356,17 @@ def _test_spop_constants(): with tf.Graph().as_default(): @function.Defun(*[dtypes.int32] * 2) def constantsFn(x, y): - z = tf.add(x,y) + vv = tf.constant([2, 3, 4], name="vv") + z = tf.add(vv + x, y) return z - a = tf.constant(20, name = "a") - b = tf.constant(40, name = "b") + a = tf.constant(20000, name = "a") + b = tf.constant(40000, name = "b") spopFn = gen_functional_ops.StatefulPartitionedCall(args=[a, b], Tout=[tf.int32], f=constantsFn) compare_tf_with_tvm([], [], 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) + def _test_spop_placeholder(): _test_spop_placeholder_one() _test_spop_placeholder_two() From 5e92663c99215af9af49563a2efdcb96280ff771 Mon Sep 17 00:00:00 2001 From: maheshambule Date: Mon, 18 May 2020 16:02:11 +0530 Subject: [PATCH 40/55] handle nested subgraph --- python/tvm/relay/frontend/tensorflow.py | 79 ++++++++++++++++++------- 1 file changed, 59 insertions(+), 20 deletions(-) diff --git a/python/tvm/relay/frontend/tensorflow.py b/python/tvm/relay/frontend/tensorflow.py index ee46c8e5ea7d..1a48e5f24c19 100644 --- a/python/tvm/relay/frontend/tensorflow.py +++ b/python/tvm/relay/frontend/tensorflow.py @@ -16,7 +16,7 @@ # specific language governing permissions and limitations # under the License. # pylint: disable=import-self, invalid-name, unused-argument, too-many-lines, len-as-condition, broad-except -# pylint: disable=import-outside-toplevel +# pylint: disable=import-outside-toplevel, redefined-builtin """TF: Tensorflow frontend.""" import warnings from collections import defaultdict @@ -45,6 +45,7 @@ __all__ = ['from_tensorflow'] + def _get_pad_pair(input1d, kernel1d, stride1d): if input1d % stride1d == 0: pad = max(kernel1d - stride1d, 0) @@ -2719,8 +2720,9 @@ def __init__(self): self._loop_var_order = {} self._hash2tfnode = {} self._while_loop_name_set = set() + self.main = self - def from_tensorflow(self, graph, layout="NHWC", shape=None, outputs=None): + def _get_func(self, graph, layout="NHWC", shape=None, outputs=None): """Construct relay nodes from tensorflow graph definition - GraphDef. Follow the tensorflow graph definition to parse and convert it to Relay. @@ -2887,8 +2889,14 @@ def from_tensorflow(self, graph, layout="NHWC", shape=None, outputs=None): out = out[0] if len(out) == 1 else _expr.Tuple(out) func = _function.Function(analysis.free_vars(out), out) - self._mod["main"] = func - return self._mod, self._params + return func + + + def from_tensorflow(self, graph, layout="NHWC", shape=None, outputs=None): + func = self._get_func(graph, layout=layout, shape=shape, outputs=outputs) + self._mod["main"] = func + return self._mod, self._params + def _parse_import_prerequisites(self, graph): """ Calculate the named preconditions from TensorFlow `graph`. @@ -3169,42 +3177,59 @@ def _convert_control_flow_operator(self, node, inputs, attrs, control_flow_node_ return op def _partition_call_operator(self, inputs, attr): - from tensorflow.python.framework import function_def_to_graph + try: + from tensorflow.python.framework import function_def_to_graph + from tensorflow.python.framework import ops + except ImportError as e: + raise ImportError( + "Unable to import tensorflow which is required {}".format(e)) + + main_graph = self.main node_func_name = attr.get('f').name - func = next((f for f in self._graph.library.function if f.signature.name == node_func_name), None) + outer_graph = ops.get_default_graph() + outer_graph_def = outer_graph.as_graph_def(add_shapes=True) + func = next((f for f in outer_graph_def.library.function + if f.signature.name == node_func_name), None) if func: # Convert function definition to graph func_input_shapes = func.attr["_input_shapes"].list.shape - subgraph, flat_tensor_name = function_def_to_graph.function_def_to_graph_def(func, func_input_shapes) + subgraph, _ = function_def_to_graph.\ + function_def_to_graph_def(func, func_input_shapes) # Computing subgraph's input shape dictionary subgraph_shape_dict, input_expr_dict = {}, {} for f_arg, input in zip(func.signature.input_arg, inputs): - subgraph_shape_dict[f_arg.name] = _infer_shape(input) input_expr_dict[f_arg.name] = input + subgraph_shape_dict[f_arg.name] = _infer_shape(input, main_graph._mod) # Construct relay nodes from the subgraph - g = GraphProto() - sub_mod, sub_params = g.from_tensorflow(subgraph, shape=subgraph_shape_dict) - self._params.update(sub_params) + g1 = SubGraphProto(self.main) + sub_func, sub_params = g1.from_tensorflow(subgraph, shape=subgraph_shape_dict) + self.main._params.update(sub_params) param_exprs = [] - for param_expr in sub_mod["main"].params: - # sub_params is subset of mod["main"].params + for param_expr in sub_func.params: + # sub_params is subset of sub_func.params param_name = param_expr.vid.name_hint - if param_name in sub_params.keys(): - param_exprs.append(param_expr) - elif param_name in input_expr_dict.keys(): + if param_name in input_expr_dict.keys(): param_exprs.append(input_expr_dict[param_name]) + elif param_name in sub_params.keys(): + param_exprs.append(param_expr) else: raise Exception("Input parameter {} not found".format(param_name)) sb = tvm.relay.scope_builder.ScopeBuilder() - func_expr = _function.Function(sub_mod["main"].params, sub_mod["main"].body) + func_expr = _function.Function(sub_func.params, sub_func.body) + func_name = 'func_{}'.format(func.signature.name) + + try: + global_func = main_graph._mod[func_name] + except ValueError: + import traceback + global_func = tvm.relay.GlobalVar(func_name) + main_graph._mod[global_func] = func_expr - global_func = tvm.relay.GlobalVar('func_{}'.format(func.signature.name)) - self._mod[global_func] = func_expr loop_ret = global_func(*param_exprs) sb.ret(loop_ret) ret = sb.get() @@ -3254,7 +3279,7 @@ def _convert_operator(self, op_name, inputs, attrs, convert_map_rnn) elif op_name in ["PartitionedCall", "StatefulPartitionedCall"]: - sym = self._partition_call_operator(inputs, attrs) + sym = self._partition_call_operator(inputs, attrs) else: raise NotImplementedError("Operator {} not implemented.".format(op_name)) return sym @@ -3321,6 +3346,19 @@ def _backtrack_construct(self, node_name): return self._nodes[node_name] + +class SubGraphProto(GraphProto): + """ A helper class for handling relay subgraph copying from Tensorflow GraphDef. + """ + def __init__(self, main): + super().__init__() + self.main = main # main graph proto + + def from_tensorflow(self, graph, layout="NHWC", shape=None, outputs=None): + func = self._get_func(graph, layout=layout, shape=shape, outputs=outputs) + return func, self._params + + def from_tensorflow(graph, layout="NHWC", shape=None, outputs=None): """Load tensorflow graph which is a python tensorflow graph object into relay. The companion parameters will be handled automatically. @@ -3347,6 +3385,7 @@ def from_tensorflow(graph, layout="NHWC", shape=None, outputs=None): params : dict of str to tvm.nd.NDArray Dict of converted parameters stored in tvm.nd.NDArray format """ + g = GraphProto() mod, params = g.from_tensorflow(graph, layout, shape, outputs) return mod, params From becbad4555ff6d311d1ebf050a825f473ace796b Mon Sep 17 00:00:00 2001 From: maheshambule Date: Mon, 18 May 2020 16:40:13 +0530 Subject: [PATCH 41/55] refactor --- python/tvm/relay/frontend/tensorflow.py | 31 ++++++++++++------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/python/tvm/relay/frontend/tensorflow.py b/python/tvm/relay/frontend/tensorflow.py index 1a48e5f24c19..41f0b64f1e09 100644 --- a/python/tvm/relay/frontend/tensorflow.py +++ b/python/tvm/relay/frontend/tensorflow.py @@ -3187,8 +3187,7 @@ def _partition_call_operator(self, inputs, attr): main_graph = self.main node_func_name = attr.get('f').name - outer_graph = ops.get_default_graph() - outer_graph_def = outer_graph.as_graph_def(add_shapes=True) + outer_graph_def = main_graph._graph func = next((f for f in outer_graph_def.library.function if f.signature.name == node_func_name), None) if func: @@ -3203,10 +3202,20 @@ def _partition_call_operator(self, inputs, attr): input_expr_dict[f_arg.name] = input subgraph_shape_dict[f_arg.name] = _infer_shape(input, main_graph._mod) - # Construct relay nodes from the subgraph - g1 = SubGraphProto(self.main) - sub_func, sub_params = g1.from_tensorflow(subgraph, shape=subgraph_shape_dict) - self.main._params.update(sub_params) + func_name = 'func_{}'.format(func.signature.name) + try: + global_func = main_graph._mod[func_name] + sub_func = global_func + sub_params = self.main._params + except ValueError: + # Construct relay nodes from the subgraph + g1 = SubGraphProto(self.main) + sub_func, sub_params = g1.from_tensorflow(subgraph, shape=subgraph_shape_dict) + self.main._params.update(sub_params) + func_expr = _function.Function(sub_func.params, sub_func.body) + global_func = tvm.relay.GlobalVar(func_name) + main_graph._mod[global_func] = func_expr + param_exprs = [] for param_expr in sub_func.params: @@ -3220,16 +3229,6 @@ def _partition_call_operator(self, inputs, attr): raise Exception("Input parameter {} not found".format(param_name)) sb = tvm.relay.scope_builder.ScopeBuilder() - func_expr = _function.Function(sub_func.params, sub_func.body) - func_name = 'func_{}'.format(func.signature.name) - - try: - global_func = main_graph._mod[func_name] - except ValueError: - import traceback - global_func = tvm.relay.GlobalVar(func_name) - main_graph._mod[global_func] = func_expr - loop_ret = global_func(*param_exprs) sb.ret(loop_ret) ret = sb.get() From 9a45af9579f5fde093802f235ab457062f67efb2 Mon Sep 17 00:00:00 2001 From: Prashant Sail Date: Mon, 18 May 2020 17:57:09 +0530 Subject: [PATCH 42/55] get op def compatible with tf 1x & 2x --- python/tvm/relay/frontend/tensorflow.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/python/tvm/relay/frontend/tensorflow.py b/python/tvm/relay/frontend/tensorflow.py index 41f0b64f1e09..0e8f183fc0cb 100644 --- a/python/tvm/relay/frontend/tensorflow.py +++ b/python/tvm/relay/frontend/tensorflow.py @@ -2911,7 +2911,9 @@ def _parse_import_prerequisites(self, graph): except ImportError as e: raise ImportError( "Unable to import tensorflow which is required {}".format(e)) - op_def = op_def_registry._registered_ops.get(node.op) + getOpDef = op_def_registry._registered_ops.get if hasattr(op_def_registry, + "_registered_ops") else op_def_registry.get + op_def = getOpDef(node.op) if node.op == "Placeholder" or node.op == 'PlaceholderWithDefault': pass elif node.op == "Const": @@ -3180,14 +3182,14 @@ def _partition_call_operator(self, inputs, attr): try: from tensorflow.python.framework import function_def_to_graph - from tensorflow.python.framework import ops except ImportError as e: raise ImportError( "Unable to import tensorflow which is required {}".format(e)) main_graph = self.main - node_func_name = attr.get('f').name outer_graph_def = main_graph._graph + + node_func_name = attr.get('f').name func = next((f for f in outer_graph_def.library.function if f.signature.name == node_func_name), None) if func: From fa122dfc304deb758871fc2b8122e1014d65c22b Mon Sep 17 00:00:00 2001 From: Prashant Sail Date: Mon, 18 May 2020 18:30:47 +0530 Subject: [PATCH 43/55] Fixed liniting issues --- python/tvm/relay/frontend/tensorflow.py | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/python/tvm/relay/frontend/tensorflow.py b/python/tvm/relay/frontend/tensorflow.py index 0e8f183fc0cb..14d5b6c75b33 100644 --- a/python/tvm/relay/frontend/tensorflow.py +++ b/python/tvm/relay/frontend/tensorflow.py @@ -2893,9 +2893,9 @@ def _get_func(self, graph, layout="NHWC", shape=None, outputs=None): def from_tensorflow(self, graph, layout="NHWC", shape=None, outputs=None): - func = self._get_func(graph, layout=layout, shape=shape, outputs=outputs) - self._mod["main"] = func - return self._mod, self._params + func = self._get_func(graph, layout=layout, shape=shape, outputs=outputs) + self._mod["main"] = func + return self._mod, self._params def _parse_import_prerequisites(self, graph): @@ -2911,8 +2911,8 @@ def _parse_import_prerequisites(self, graph): except ImportError as e: raise ImportError( "Unable to import tensorflow which is required {}".format(e)) - getOpDef = op_def_registry._registered_ops.get if hasattr(op_def_registry, - "_registered_ops") else op_def_registry.get + getOpDef = op_def_registry._registered_ops.get if hasattr(op_def_registry,\ + "_registered_ops") else op_def_registry.get op_def = getOpDef(node.op) if node.op == "Placeholder" or node.op == 'PlaceholderWithDefault': pass @@ -2926,8 +2926,9 @@ def _parse_import_prerequisites(self, graph): _control_flow_nodes]]): pass elif op_def is not None and op_def.is_stateful: - raise Exception("Found {} stateful operator in this graph. " - "Rejecting the graph as TVM does not support stateful operations ".format(node.op)) + raise Exception("Found {} stateful operator in this graph. "\ + "Rejecting the graph as TVM does not support stateful operations "\ + .format(node.op)) else: missing_operators.add(node.op) @@ -3234,8 +3235,9 @@ def _partition_call_operator(self, inputs, attr): loop_ret = global_func(*param_exprs) sb.ret(loop_ret) ret = sb.get() - - return ret + else: + raise Exception("Function not found - {}".format(node_func_name)) + return ret def _convert_operator(self, op_name, inputs, attrs, graph, identity_list=None, convert_map=None): From 9b8f24d9ff5ad837af02f9a25fbfc9be5911cabd Mon Sep 17 00:00:00 2001 From: maheshambule Date: Mon, 18 May 2020 19:41:48 +0530 Subject: [PATCH 44/55] added doctsring and few nits --- python/tvm/relay/frontend/tensorflow.py | 63 +++++++++++++++++-------- 1 file changed, 44 insertions(+), 19 deletions(-) diff --git a/python/tvm/relay/frontend/tensorflow.py b/python/tvm/relay/frontend/tensorflow.py index 41f0b64f1e09..b512b0daed3d 100644 --- a/python/tvm/relay/frontend/tensorflow.py +++ b/python/tvm/relay/frontend/tensorflow.py @@ -2720,9 +2720,9 @@ def __init__(self): self._loop_var_order = {} self._hash2tfnode = {} self._while_loop_name_set = set() - self.main = self + self._main_graph_proto = self - def _get_func(self, graph, layout="NHWC", shape=None, outputs=None): + def _get_relay_func(self, graph, layout="NHWC", shape=None, outputs=None): """Construct relay nodes from tensorflow graph definition - GraphDef. Follow the tensorflow graph definition to parse and convert it to Relay. @@ -2891,12 +2891,13 @@ def _get_func(self, graph, layout="NHWC", shape=None, outputs=None): func = _function.Function(analysis.free_vars(out), out) return func - def from_tensorflow(self, graph, layout="NHWC", shape=None, outputs=None): - func = self._get_func(graph, layout=layout, shape=shape, outputs=outputs) - self._mod["main"] = func - return self._mod, self._params - + """ Wrapper to _get_func which converts Tensorflow graph to Relay function. + Relay module is created using the function + """ + func = self._get_relay_func(graph, layout=layout, shape=shape, outputs=outputs) + self._mod["main"] = func + return self._mod, self._params def _parse_import_prerequisites(self, graph): """ Calculate the named preconditions from TensorFlow `graph`. @@ -3177,6 +3178,27 @@ def _convert_control_flow_operator(self, node, inputs, attrs, control_flow_node_ return op def _partition_call_operator(self, inputs, attr): + """ + Convert the Relay Partition call ops into Relay Function calls and + function definitions from Tensorflow graph library attribute to Relay global + functions + + Parameters + ---------- + node: TensorFlow graph node object. + A TensorFlow graph node object. + + inputs : List[tvm.relay.Expr] + List of input symbols. + + attrs : Dict[tvm.Attrs] + Dict of operator attributes. + + Returns + ------- + op : tvm.relay.Expr + Converted relay expression. + """ try: from tensorflow.python.framework import function_def_to_graph @@ -3185,9 +3207,9 @@ def _partition_call_operator(self, inputs, attr): raise ImportError( "Unable to import tensorflow which is required {}".format(e)) - main_graph = self.main + main_graph_proto = self._main_graph_proto node_func_name = attr.get('f').name - outer_graph_def = main_graph._graph + outer_graph_def = main_graph_proto._graph func = next((f for f in outer_graph_def.library.function if f.signature.name == node_func_name), None) if func: @@ -3200,22 +3222,21 @@ def _partition_call_operator(self, inputs, attr): subgraph_shape_dict, input_expr_dict = {}, {} for f_arg, input in zip(func.signature.input_arg, inputs): input_expr_dict[f_arg.name] = input - subgraph_shape_dict[f_arg.name] = _infer_shape(input, main_graph._mod) + subgraph_shape_dict[f_arg.name] = _infer_shape(input, main_graph_proto._mod) func_name = 'func_{}'.format(func.signature.name) try: - global_func = main_graph._mod[func_name] + global_func = main_graph_proto._mod[func_name] sub_func = global_func - sub_params = self.main._params + sub_params = main_graph_proto._params except ValueError: # Construct relay nodes from the subgraph - g1 = SubGraphProto(self.main) + g1 = SubGraphProto(main_graph_proto) sub_func, sub_params = g1.from_tensorflow(subgraph, shape=subgraph_shape_dict) - self.main._params.update(sub_params) + main_graph_proto._params.update(sub_params) func_expr = _function.Function(sub_func.params, sub_func.body) global_func = tvm.relay.GlobalVar(func_name) - main_graph._mod[global_func] = func_expr - + main_graph_proto._mod[global_func] = func_expr param_exprs = [] for param_expr in sub_func.params: @@ -3349,12 +3370,16 @@ def _backtrack_construct(self, node_name): class SubGraphProto(GraphProto): """ A helper class for handling relay subgraph copying from Tensorflow GraphDef. """ - def __init__(self, main): + def __init__(self, main_graph_proto): super().__init__() - self.main = main # main graph proto + self._main_graph_proto = main_graph_proto # holds main graph proto object def from_tensorflow(self, graph, layout="NHWC", shape=None, outputs=None): - func = self._get_func(graph, layout=layout, shape=shape, outputs=outputs) + """ Wrapper to _get_func which converts Tensorflow graph to Relay function. + Relay module is created using the function + """ + + func = self._get_relay_func(graph, layout=layout, shape=shape, outputs=outputs) return func, self._params From 58883173d578cb1c5a73a5c8dcee29efd4e4c0c5 Mon Sep 17 00:00:00 2001 From: deepak Date: Mon, 18 May 2020 19:57:16 +0530 Subject: [PATCH 45/55] Merged changes for positive test cases and negative test cases --- python/tvm/relay/frontend/tensorflow.py | 7 +- .../frontend/tensorflow/test_forward.py | 151 +++++++----------- 2 files changed, 67 insertions(+), 91 deletions(-) diff --git a/python/tvm/relay/frontend/tensorflow.py b/python/tvm/relay/frontend/tensorflow.py index 14d5b6c75b33..074e693fa631 100644 --- a/python/tvm/relay/frontend/tensorflow.py +++ b/python/tvm/relay/frontend/tensorflow.py @@ -2926,7 +2926,7 @@ def _parse_import_prerequisites(self, graph): _control_flow_nodes]]): pass elif op_def is not None and op_def.is_stateful: - raise Exception("Found {} stateful operator in this graph. "\ + raise Exception("Found a stateful operator in this graph {}. "\ "Rejecting the graph as TVM does not support stateful operations "\ .format(node.op)) else: @@ -3194,6 +3194,11 @@ def _partition_call_operator(self, inputs, attr): func = next((f for f in outer_graph_def.library.function if f.signature.name == node_func_name), None) if func: + devices = set(node.device for node in func.node_def) + if len(devices) > 1: + raise Exception("Found inconsistent Device assignment in the "\ + "Stateful Partitioned SubGraph. Rejecting "\ + "the subgraph ") # Convert function definition to graph func_input_shapes = func.attr["_input_shapes"].list.shape subgraph, _ = function_def_to_graph.\ diff --git a/tests/python/frontend/tensorflow/test_forward.py b/tests/python/frontend/tensorflow/test_forward.py index 842b3b4b8fad..e5d51fa1c6ba 100644 --- a/tests/python/frontend/tensorflow/test_forward.py +++ b/tests/python/frontend/tensorflow/test_forward.py @@ -1128,13 +1128,13 @@ def test_read_variable_op(): tf_output = run_tf_graph(sess, in_data, in_name, out_name) shape_dict = {e: i.shape for e, i in zip(in_name, in_data)} - with pytest.raises(Exception) as exexcinfo: + with pytest.raises(Exception) as execinfo: mod, params = relay.frontend.from_tensorflow(final_graph_def, layout=None, shape=shape_dict, outputs=None) - assert exexcinfo.value.args[0].startswith("Graph is not frozen. Provide a frozen graph.") + assert execinfo.value.args[0].startswith("Found a stateful operator in this graph") # Now convert the variables to constant and run inference on the converted graph final_graph_def = tf.graph_util.convert_variables_to_constants( @@ -3168,6 +3168,7 @@ def test_forward_isinf(): def test_forward_isfinite(): _verify_infiniteness_ops(tf.is_finite, "isfinite") + def _test_spop_placeholder_one(): with tf.Graph().as_default(): @@ -3188,6 +3189,7 @@ def Forward(x,y): compare_tf_with_tvm([data, data2, data3], ['pl1:0', 'pl2:0', 'pl3:0'], ['StatefulPartitionedCall:0',z2.name], mode='vm', init_global_variables=True) + def _test_spop_placeholder_two(): with tf.Graph().as_default(): data = np.ones([1], dtype=int).astype(np.int32) @@ -3202,6 +3204,7 @@ def pl_with_default(pl): z = gen_functional_ops.StatefulPartitionedCall(args=[tpl], Tout=[tf.int32], f=pl_with_default) compare_tf_with_tvm(data, ['pl1:0'], 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) + def _test_spop_placeholder_three(): with tf.Graph().as_default(): t1 = tf.placeholder(tf.int32, (3, 3, 3), "t1") @@ -3216,6 +3219,7 @@ def add(x, y): t3 = add(t1, t2) compare_tf_with_tvm([t1_data, t2_data], ['t1:0', 't2:0'], [t3.name], mode='vm', init_global_variables=True) + def _test_spop_placeholder_four(): with tf.Graph().as_default(): t1_data = np.array([[-1, 1, 3], [2, -2, 4], [2, -3, 14]], dtype=np.int32) @@ -3230,6 +3234,7 @@ def add(x, y): t3 = add(t1, t2) compare_tf_with_tvm([t1_data, t2_data], ['t1:0', 't2:0'], [t3.name], mode='vm', init_global_variables=True) + def _test_spop_function_invocation_basic(): with tf.Graph().as_default(): @@ -3250,6 +3255,7 @@ def fun3(x,y): compare_tf_with_tvm([], [], [t3.name], mode='vm', init_global_variables=True) + def _test_spop_function_invocation_nested(): with tf.Graph().as_default(): t1 = tf.placeholder(tf.int32, (3, 3, 3), name="t1") @@ -3273,14 +3279,15 @@ def myfunc2(x, y): compare_tf_with_tvm([t1_data, t2_data], ['t1:0', 't2:0'], [res2.name], mode='vm', init_global_variables=True) -def _test_spop_function_invocation_autograph(): + +def _test_spop_function_invocation_no_autograph(): with tf.Graph().as_default(): - @tf.function + @tf.function(autograph=False) def fun1(a): return tf.multiply(a,a) - @tf.function + @tf.function(autograph=False) def fun2(b): return tf.multiply(b,10) @@ -3295,6 +3302,7 @@ def fun3(x,y): compare_tf_with_tvm([], [], [t3.name], mode='vm', init_global_variables=True) + def _test_spop_function_invocation_defun(): with tf.Graph().as_default(): @@ -3315,6 +3323,7 @@ def fun3(x,y): Tout=[dtypes.float32], f=fun3, name="SpopFnInvocation") compare_tf_with_tvm([],[], 'SpopFnInvocation:0', mode='vm', init_global_variables=True) + def _test_spop_arithmetic(): with tf.Graph().as_default(): @function.Defun(*[dtypes.int32]*3) @@ -3329,6 +3338,7 @@ def arithmetic(m,x,c): compare_tf_with_tvm([],[],'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) + def _test_spop_control_flow(): with tf.Graph().as_default(): @@ -3344,9 +3354,12 @@ def Body1(x, y): z = math_ops.multiply(x, y*i) return z - op = gen_functional_ops.StatefulPartitionedCall(args=[constant_op.constant(32.), constant_op.constant(100.)], Tout=[dtypes.float32], f=Body1) + op = gen_functional_ops.StatefulPartitionedCall( + args=[constant_op.constant(32.), constant_op.constant(100.)], + Tout=[dtypes.float32], f=Body1) compare_tf_with_tvm([], [], 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) + def _test_spop_variables(): with tf.Graph().as_default(): const1 = tf.constant(10) @@ -3361,6 +3374,7 @@ def Forward(x,y): z = gen_functional_ops.StatefulPartitionedCall(args=[var1,var2],Tout=[tf.int32], f=Forward) compare_tf_with_tvm([], [], 'StatefulPartitionedCall:0', init_global_variables=True, mode="vm") + def _test_spop_constants(): with tf.Graph().as_default(): @function.Defun(*[dtypes.int32] * 2) @@ -3376,18 +3390,6 @@ def constantsFn(x, y): compare_tf_with_tvm([], [], 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) -def _test_spop_placeholder(): - _test_spop_placeholder_one() - _test_spop_placeholder_two() - _test_spop_placeholder_three() - _test_spop_placeholder_four() - -def _test_spop_function_invocation(): - _test_spop_function_invocation_basic() - _test_spop_function_invocation_nested() - _test_spop_function_invocation_autograph() - _test_spop_function_invocation_defun() - def _test_spop_stateful(): tf.reset_default_graph() @@ -3408,7 +3410,10 @@ def FunctionWithStatefulOp(m, n): return z op = FunctionWithStatefulOp(constant_op.constant(1.), constant_op.constant(2.)) - compare_tf_with_tvm([], [], [op.name], init_global_variables=True, mode="vm") + with pytest.raises(Exception) as execinfo: + compare_tf_with_tvm([], [], [op.name], init_global_variables=True, mode="vm") + assert execinfo.value.args[0].startswith("Found a stateful operator in this graph") + def _test_spop_device_assignment(): @@ -3427,34 +3432,19 @@ def fun2(b): def fun3(x,y): with ops.device("/CPU:0"): x = fun2(x) - with ops.device("/job:localhost/replica:0/task:0/device:CPU:1"): - y = fun1(y) with ops.device("/job:localhost/replica:0/task:0/device:CPU:2"): + y = fun1(y) + with ops.device("/job:localhost/replica:0/task:0/device:CPU:3"): z = tf.add(x,y) return z op = gen_functional_ops.StatefulPartitionedCall(args=[tf.constant(10.5),tf.constant(20.4)], Tout=[dtypes.float32], f=fun3) - try: - from tensorflow.core.protobuf import config_pb2 - except ImportError as e: - raise ImportError( - "Unable to import tensorflow which is required {}".format(e)) - - run_options = config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE) - run_metadata = config_pb2.RunMetadata() - with tf.Session() as sess: - sess.run(tf.global_variables_initializer()) - print("The output of device assignment run is = ", - sess.run(op, options=run_options, run_metadata=run_metadata)) - assignedDevicesSet = set() - for func in run_metadata.step_stats.dev_stats: - print("device used: ", repr(func.device)) - assignedDevicesSet.add(func.device) - if (len(assignedDevicesSet) > 1): - print("no of devices used are: ",len(assignedDevicesSet)) - raise Exception("Device assignment is not consistent. Rejecting the graph") - compare_tf_with_tvm([],[], 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) + with pytest.raises(Exception) as execinfo: + compare_tf_with_tvm([], [], 'StatefulPartitionedCall:0', + mode='vm', init_global_variables=True) + assert execinfo.value.args[0].startswith("Found inconsistent Device assignment") + def _test_spop_resource_variables(): tf.reset_default_graph() @@ -3470,71 +3460,52 @@ def resourceVariablesTest(x, y): return tf.multiply(x, y) op = resourceVariablesTest(var1,var2) + with pytest.raises(Exception) as execinfo: + compare_tf_with_tvm([], [], 'StatefulPartitionedCall:0', + mode='vm', init_global_variables=True) + assert execinfo.value.args[0].startswith("Found a stateful operator in this graph") - def isResourceVariable(var): - try: - from tensorflow.python.ops.variables import RefVariable - from tensorflow.python.ops.resource_variable_ops import ResourceVariable - except ImportError as e: - raise ImportError( - "Unable to import tensorflow which is required {}".format(e)) - return bool(not issubclass(var.__class__,RefVariable) - and issubclass(var.__class__,ResourceVariable)) - - with tf.Session() as sess: - sess.run(tf.global_variables_initializer()) - sess.run(op) - mylist = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) - resVarFound, resVarCnt = False, 0 - for var in mylist: - if isResourceVariable(var): - resVarFound = True - resVarCnt += 1 - print(resVarFound) - for var in mylist: - print(var, var.__class__) - if(resVarCnt > 0): - print("Graph contains {} many resource variables ".format(resVarCnt)) - raise Exception("Graph contains Resource variables, so rejecting the graph") - - compare_tf_with_tvm([], [], 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) - -def _test_forward_spop_positive(): - _test_spop_placeholder() - _test_spop_function_invocation() - _test_spop_arithmetic() - _test_spop_control_flow() - _test_spop_variables() - _test_spop_constants() - -def _test_forward_spop_negative(): +def test_forward_spop(): #Uncomment the following test case to test that TVM rejects any TF stateful operations - # except StatefulPartitionedCall/PartitionedCall(as these two operators can still be used - # as container graphs to execute "stateless" operations internally. - # _test_spop_stateful() + # (including Resource Variables) except StatefulPartitionedCall/PartitionedCall + # (as these two operators can still be used as container graphs to execute + # "stateless" operations internally. + _test_spop_stateful() # Uncomment the following test case to test that TVM rejects inconsistent device assignment # while using StatefulPartitionedCall/PartitionedCall operators which in case of TVM will # be used as container graphs to internally execute "stateless" operations. _test_spop_device_assignment() - #Uncomment the following test case to test that TVM rejects any graph containing resource variables with - #StatefulPartitionedOp. - # _test_spop_resource_variables() + #Uncomment the following test case to test that TVM rejects any graph containing + # resource variables with StatefulPartitionedOp. + _test_spop_resource_variables() -def test_forward_spop(): - _test_forward_spop_positive() - _test_forward_spop_negative() + #Placeholder test cases + _test_spop_placeholder_one() + _test_spop_placeholder_two() + _test_spop_placeholder_three() + _test_spop_placeholder_four() + + #Function Invocation test cases + _test_spop_function_invocation_basic() + _test_spop_function_invocation_nested() + _test_spop_function_invocation_no_autograph() + _test_spop_function_invocation_defun() + + #Test cases for various other TF constructs + _test_spop_arithmetic() + _test_spop_control_flow() + _test_spop_variables() + _test_spop_constants() ####################################################################### # Main # ---- if __name__ == '__main__': - # StatefulPartitionedOp + # StatefulPartitionedCall test_forward_spop() - test_forward_spop_positive() - test_forward_spop_negative() # Transforms test_forward_slice() test_forward_transpose() From 7f3b52bdb5323597b6c8e78037de04ad5c1a13a3 Mon Sep 17 00:00:00 2001 From: deepak Date: Mon, 18 May 2020 20:04:56 +0530 Subject: [PATCH 46/55] Moved StatefulPartitionedCall test case to the end of the TC list --- tests/python/frontend/tensorflow/test_forward.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/python/frontend/tensorflow/test_forward.py b/tests/python/frontend/tensorflow/test_forward.py index e5d51fa1c6ba..be8ef835da36 100644 --- a/tests/python/frontend/tensorflow/test_forward.py +++ b/tests/python/frontend/tensorflow/test_forward.py @@ -3504,8 +3504,7 @@ def test_forward_spop(): # Main # ---- if __name__ == '__main__': - # StatefulPartitionedCall - test_forward_spop() + # Transforms test_forward_slice() test_forward_transpose() @@ -3630,3 +3629,6 @@ def test_forward_spop(): # Sharing params case using Mean ops test_sharing_node() + + # StatefulPartitionedCall + test_forward_spop() From 92e1853700d0bdbf1f871cc756368eb083846aa1 Mon Sep 17 00:00:00 2001 From: deepak Date: Mon, 18 May 2020 20:33:18 +0530 Subject: [PATCH 47/55] Fixed some typos and semantics --- python/tvm/relay/frontend/tensorflow.py | 1 - tests/python/frontend/tensorflow/test_forward.py | 7 +++---- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/python/tvm/relay/frontend/tensorflow.py b/python/tvm/relay/frontend/tensorflow.py index 2228fe01147e..441812f0644b 100644 --- a/python/tvm/relay/frontend/tensorflow.py +++ b/python/tvm/relay/frontend/tensorflow.py @@ -45,7 +45,6 @@ __all__ = ['from_tensorflow'] - def _get_pad_pair(input1d, kernel1d, stride1d): if input1d % stride1d == 0: pad = max(kernel1d - stride1d, 0) diff --git a/tests/python/frontend/tensorflow/test_forward.py b/tests/python/frontend/tensorflow/test_forward.py index be8ef835da36..80266471ccc2 100644 --- a/tests/python/frontend/tensorflow/test_forward.py +++ b/tests/python/frontend/tensorflow/test_forward.py @@ -40,7 +40,6 @@ from tensorflow.python.framework import ops from tensorflow.python.framework import dtypes from tensorflow.python.ops import gen_functional_ops -from tensorflow.python.framework import op_def_registry from distutils.version import LooseVersion import tvm from tvm import te @@ -3466,18 +3465,18 @@ def resourceVariablesTest(x, y): assert execinfo.value.args[0].startswith("Found a stateful operator in this graph") def test_forward_spop(): - #Uncomment the following test case to test that TVM rejects any TF stateful operations + # This test case is to test that TVM rejects any TF stateful operations # (including Resource Variables) except StatefulPartitionedCall/PartitionedCall # (as these two operators can still be used as container graphs to execute # "stateless" operations internally. _test_spop_stateful() - # Uncomment the following test case to test that TVM rejects inconsistent device assignment + # This test case is to test that TVM rejects inconsistent device assignment # while using StatefulPartitionedCall/PartitionedCall operators which in case of TVM will # be used as container graphs to internally execute "stateless" operations. _test_spop_device_assignment() - #Uncomment the following test case to test that TVM rejects any graph containing + # This test case is to test that TVM rejects any graph containing # resource variables with StatefulPartitionedOp. _test_spop_resource_variables() From 7968859095cb3c9ca991dc8ec46bffaabfdfbb7b Mon Sep 17 00:00:00 2001 From: maheshambule Date: Mon, 18 May 2020 21:38:59 +0530 Subject: [PATCH 48/55] dmlc-core --- 3rdparty/dmlc-core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/3rdparty/dmlc-core b/3rdparty/dmlc-core index 808f485387f9..981b1c32f916 160000 --- a/3rdparty/dmlc-core +++ b/3rdparty/dmlc-core @@ -1 +1 @@ -Subproject commit 808f485387f9a03f78fa9f1159f387d0d91b7a28 +Subproject commit 981b1c32f91668e669ee376856f92f36cfd2a351 From cdc4af9c1cacf01326862fe16a966d39369011da Mon Sep 17 00:00:00 2001 From: maheshambule Date: Mon, 18 May 2020 21:43:38 +0530 Subject: [PATCH 49/55] dmlc-core --- 3rdparty/dmlc-core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/3rdparty/dmlc-core b/3rdparty/dmlc-core index 981b1c32f916..ff3db4367a30 160000 --- a/3rdparty/dmlc-core +++ b/3rdparty/dmlc-core @@ -1 +1 @@ -Subproject commit 981b1c32f91668e669ee376856f92f36cfd2a351 +Subproject commit ff3db4367a30f542aafb83b4af45e685b80102d0 From fe58431b8bc26b8fa5ad00511e27c990039204a7 Mon Sep 17 00:00:00 2001 From: maheshambule Date: Mon, 18 May 2020 22:23:22 +0530 Subject: [PATCH 50/55] fixes --- python/tvm/relay/frontend/tensorflow.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/python/tvm/relay/frontend/tensorflow.py b/python/tvm/relay/frontend/tensorflow.py index 32dda7c09c4a..168fb1dd9cdc 100644 --- a/python/tvm/relay/frontend/tensorflow.py +++ b/python/tvm/relay/frontend/tensorflow.py @@ -2888,8 +2888,8 @@ def _get_relay_func(self, graph, layout="NHWC", shape=None, outputs=None): return func def from_tensorflow(self, graph, layout="NHWC", shape=None, outputs=None): - """ Wrapper to _get_func which converts Tensorflow graph to Relay function. - Relay module is created using the function + """ Wrapper to _get_relay_func which converts Tensorflow graph to Relay function + which is used as main function for the Relay module """ func = self._get_relay_func(graph, layout=layout, shape=shape, outputs=outputs) self._mod["main"] = func @@ -3329,7 +3329,6 @@ def _backtrack_construct(self, node_name): Converted relay expression """ node_name = node_name.split(':')[0].split("^")[-1] - inputs = [] if node_name not in self._nodes: node = self._tf_node_map[node_name] @@ -3344,6 +3343,7 @@ def _backtrack_construct(self, node_name): attr["_output_shapes"] = self._output_shapes[node_name] attr["_node_name"] = node.name attr["_target_layout"] = self._layout + inputs = [] for iname in node.input: in_op = self._backtrack_construct(iname) if isinstance(in_op, _expr.TupleWrapper): @@ -3380,10 +3380,9 @@ def __init__(self, main_graph_proto): self._main_graph_proto = main_graph_proto # holds main graph proto object def from_tensorflow(self, graph, layout="NHWC", shape=None, outputs=None): - """ Wrapper to _get_func which converts Tensorflow graph to Relay function. - Relay module is created using the function + """ Wrapper to _get_relay_func which converts Tensorflow graph to Relay function. + Return Relay function and params """ - func = self._get_relay_func(graph, layout=layout, shape=shape, outputs=outputs) return func, self._params From 06a80b3f15a785e8a4118f4c8e58823bfae9b01c Mon Sep 17 00:00:00 2001 From: deepak Date: Fri, 29 May 2020 03:42:09 +0530 Subject: [PATCH 51/55] Addressing Review comments in the PR for SPOP support --- python/tvm/relay/frontend/tensorflow.py | 17 +++---- .../frontend/tensorflow/test_forward.py | 46 +++++++++---------- 2 files changed, 32 insertions(+), 31 deletions(-) diff --git a/python/tvm/relay/frontend/tensorflow.py b/python/tvm/relay/frontend/tensorflow.py index 168fb1dd9cdc..70fd69e3b0aa 100644 --- a/python/tvm/relay/frontend/tensorflow.py +++ b/python/tvm/relay/frontend/tensorflow.py @@ -20,6 +20,7 @@ """TF: Tensorflow frontend.""" import warnings from collections import defaultdict +from tensorflow.python.framework import op_def_registry # Numpy support import numpy as np @@ -2717,6 +2718,7 @@ def __init__(self): self._hash2tfnode = {} self._while_loop_name_set = set() self._main_graph_proto = self + self._stateful_ops_list = [] def _get_relay_func(self, graph, layout="NHWC", shape=None, outputs=None): """Construct relay nodes from tensorflow graph definition - GraphDef. @@ -2773,6 +2775,11 @@ def _get_relay_func(self, graph, layout="NHWC", shape=None, outputs=None): if freezed_ops: raise Exception("Graph is not frozen. Provide a frozen graph. " "Found operators {}".format(freezed_ops)) + stateful_ops = [op for op in missing_operators if op in self._main_graph_proto._stateful_ops_list] + if stateful_ops: + raise Exception("Found stateful operators in this graph {}. " \ + "Rejecting the graph as TVM does not support stateful operations " \ + .format(stateful_ops)) raise NotImplementedError( "The following operators are not implemented: {}".format(missing_operators)) @@ -2903,11 +2910,6 @@ def _parse_import_prerequisites(self, graph): """ missing_operators = set() for node in graph.node: - try: - from tensorflow.python.framework import op_def_registry - except ImportError as e: - raise ImportError( - "Unable to import tensorflow which is required {}".format(e)) getOpDef = op_def_registry._registered_ops.get if hasattr(op_def_registry,\ "_registered_ops") else op_def_registry.get op_def = getOpDef(node.op) @@ -2923,9 +2925,8 @@ def _parse_import_prerequisites(self, graph): _control_flow_nodes]]): pass elif op_def is not None and op_def.is_stateful: - raise Exception("Found a stateful operator in this graph {}. "\ - "Rejecting the graph as TVM does not support stateful operations "\ - .format(node.op)) + self._main_graph_proto._stateful_ops_list.append(node.op) + missing_operators.add(node.op) else: missing_operators.add(node.op) diff --git a/tests/python/frontend/tensorflow/test_forward.py b/tests/python/frontend/tensorflow/test_forward.py index 7c6140df16c2..7f547ac44375 100644 --- a/tests/python/frontend/tensorflow/test_forward.py +++ b/tests/python/frontend/tensorflow/test_forward.py @@ -40,6 +40,7 @@ from tensorflow.python.framework import ops from tensorflow.python.framework import dtypes from tensorflow.python.ops import gen_functional_ops +from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 from distutils.version import LooseVersion import tvm from tvm import te @@ -180,6 +181,7 @@ def name_without_num(name): if init_global_variables: sess.run(variables.global_variables_initializer()) final_graph_def = tf_testing.AddShapesToGraphDef(sess, out_node) + tf_output = run_tf_graph(sess, in_data, in_name, out_name) for device in ["llvm", "cuda"]: @@ -1148,7 +1150,7 @@ def test_read_variable_op(): shape=shape_dict, outputs=None) - assert execinfo.value.args[0].startswith("Found a stateful operator in this graph") + assert execinfo.value.args[0].startswith("Graph is not frozen. Provide a frozen graph") # Now convert the variables to constant and run inference on the converted graph final_graph_def = tf.graph_util.convert_variables_to_constants( @@ -3183,7 +3185,7 @@ def test_forward_isfinite(): _verify_infiniteness_ops(tf.is_finite, "isfinite") -def _test_spop_placeholder_one(): +def _test_spop_placeholder_without_shape_info(): with tf.Graph().as_default(): @function.Defun(*[tf.int32]*2) @@ -3204,7 +3206,7 @@ def Forward(x,y): ['StatefulPartitionedCall:0',z2.name], mode='vm', init_global_variables=True) -def _test_spop_placeholder_two(): +def _test_spop_placeholder_with_shape_and_default_value(): with tf.Graph().as_default(): data = np.ones([1], dtype=int).astype(np.int32) dataVar = tf.Variable(data, shape=data.shape) @@ -3219,7 +3221,7 @@ def pl_with_default(pl): compare_tf_with_tvm(data, ['pl1:0'], 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) -def _test_spop_placeholder_three(): +def _test_spop_placeholder_numpy_arange_feed(): with tf.Graph().as_default(): t1 = tf.placeholder(tf.int32, (3, 3, 3), "t1") t1_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3)) @@ -3234,7 +3236,7 @@ def add(x, y): compare_tf_with_tvm([t1_data, t2_data], ['t1:0', 't2:0'], [t3.name], mode='vm', init_global_variables=True) -def _test_spop_placeholder_four(): +def _test_spop_placeholder_numpy_array_feed(): with tf.Graph().as_default(): t1_data = np.array([[-1, 1, 3], [2, -2, 4], [2, -3, 14]], dtype=np.int32) t2_data = np.array([[-2, 1, 2], [12, -2, 14], [12, -3, 4]], dtype=np.int32) @@ -3405,7 +3407,10 @@ def constantsFn(x, y): def _test_spop_stateful(): - + # This test case is to test that TVM rejects any TF stateful operations + # (including Resource Variables) except StatefulPartitionedCall/PartitionedCall + # (as these two operators can still be used as container graphs to execute + # "stateless" operations internally. tf.reset_default_graph() with tf.Graph().as_default(): @@ -3426,10 +3431,13 @@ def FunctionWithStatefulOp(m, n): op = FunctionWithStatefulOp(constant_op.constant(1.), constant_op.constant(2.)) with pytest.raises(Exception) as execinfo: compare_tf_with_tvm([], [], [op.name], init_global_variables=True, mode="vm") - assert execinfo.value.args[0].startswith("Found a stateful operator in this graph") + assert execinfo.value.args[0].startswith("Found stateful operators in this graph") def _test_spop_device_assignment(): + # This test case is to test that TVM rejects inconsistent device assignment + # while using StatefulPartitionedCall/PartitionedCall operators which in case of TVM will + # be used as container graphs to internally execute "stateless" operations. tf.reset_default_graph() with tf.Graph().as_default(): @@ -3461,6 +3469,9 @@ def fun3(x,y): def _test_spop_resource_variables(): + # This test case is to test that TVM rejects any graph containing + # resource variables with StatefulPartitionedOp. + tf.reset_default_graph() with tf.Graph().as_default(): @@ -3477,29 +3488,18 @@ def resourceVariablesTest(x, y): with pytest.raises(Exception) as execinfo: compare_tf_with_tvm([], [], 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) - assert execinfo.value.args[0].startswith("Found a stateful operator in this graph") + assert execinfo.value.args[0].startswith("Graph is not frozen. Provide a frozen graph.") def test_forward_spop(): - # This test case is to test that TVM rejects any TF stateful operations - # (including Resource Variables) except StatefulPartitionedCall/PartitionedCall - # (as these two operators can still be used as container graphs to execute - # "stateless" operations internally. _test_spop_stateful() - - # This test case is to test that TVM rejects inconsistent device assignment - # while using StatefulPartitionedCall/PartitionedCall operators which in case of TVM will - # be used as container graphs to internally execute "stateless" operations. _test_spop_device_assignment() - - # This test case is to test that TVM rejects any graph containing - # resource variables with StatefulPartitionedOp. _test_spop_resource_variables() #Placeholder test cases - _test_spop_placeholder_one() - _test_spop_placeholder_two() - _test_spop_placeholder_three() - _test_spop_placeholder_four() + _test_spop_placeholder_without_shape_info() + _test_spop_placeholder_with_shape_and_default_value() + _test_spop_placeholder_numpy_arange_feed() + _test_spop_placeholder_numpy_array_feed() #Function Invocation test cases _test_spop_function_invocation_basic() From cf8f3740e7949b72407b3de37c8c463918efbd41 Mon Sep 17 00:00:00 2001 From: deepak Date: Fri, 29 May 2020 03:50:32 +0530 Subject: [PATCH 52/55] Fixed pylint errors --- python/tvm/relay/frontend/tensorflow.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/python/tvm/relay/frontend/tensorflow.py b/python/tvm/relay/frontend/tensorflow.py index 70fd69e3b0aa..8bfbaa6d8f3e 100644 --- a/python/tvm/relay/frontend/tensorflow.py +++ b/python/tvm/relay/frontend/tensorflow.py @@ -2775,7 +2775,8 @@ def _get_relay_func(self, graph, layout="NHWC", shape=None, outputs=None): if freezed_ops: raise Exception("Graph is not frozen. Provide a frozen graph. " "Found operators {}".format(freezed_ops)) - stateful_ops = [op for op in missing_operators if op in self._main_graph_proto._stateful_ops_list] + stateful_ops = [op for op in missing_operators + if op in self._main_graph_proto._stateful_ops_list] if stateful_ops: raise Exception("Found stateful operators in this graph {}. " \ "Rejecting the graph as TVM does not support stateful operations " \ From bb09d3dbfe51a7812f9fbf3c46dec79eecc2060d Mon Sep 17 00:00:00 2001 From: deepak Date: Fri, 29 May 2020 10:11:29 +0530 Subject: [PATCH 53/55] Corrected tensorflow import syntax --- python/tvm/relay/frontend/tensorflow.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/python/tvm/relay/frontend/tensorflow.py b/python/tvm/relay/frontend/tensorflow.py index 8bfbaa6d8f3e..91f341cf0b92 100644 --- a/python/tvm/relay/frontend/tensorflow.py +++ b/python/tvm/relay/frontend/tensorflow.py @@ -20,6 +20,10 @@ """TF: Tensorflow frontend.""" import warnings from collections import defaultdict +try: + import tensorflow.compat.v1 as tf +except ImportError: + import tensorflow as tf from tensorflow.python.framework import op_def_registry # Numpy support From 01244c95fcc2bcf1bb20f5e7f41dfb4c85a74aad Mon Sep 17 00:00:00 2001 From: deepak Date: Fri, 29 May 2020 10:33:01 +0530 Subject: [PATCH 54/55] Placed the op_def_registry module import outside of for loop --- python/tvm/relay/frontend/tensorflow.py | 6 +----- tests/python/frontend/tensorflow/test_forward.py | 1 - 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/python/tvm/relay/frontend/tensorflow.py b/python/tvm/relay/frontend/tensorflow.py index 91f341cf0b92..407f9997134a 100644 --- a/python/tvm/relay/frontend/tensorflow.py +++ b/python/tvm/relay/frontend/tensorflow.py @@ -20,11 +20,6 @@ """TF: Tensorflow frontend.""" import warnings from collections import defaultdict -try: - import tensorflow.compat.v1 as tf -except ImportError: - import tensorflow as tf -from tensorflow.python.framework import op_def_registry # Numpy support import numpy as np @@ -2914,6 +2909,7 @@ def _parse_import_prerequisites(self, graph): which are not supported """ missing_operators = set() + from tensorflow.python.framework import op_def_registry for node in graph.node: getOpDef = op_def_registry._registered_ops.get if hasattr(op_def_registry,\ "_registered_ops") else op_def_registry.get diff --git a/tests/python/frontend/tensorflow/test_forward.py b/tests/python/frontend/tensorflow/test_forward.py index 7f547ac44375..f874d7452502 100644 --- a/tests/python/frontend/tensorflow/test_forward.py +++ b/tests/python/frontend/tensorflow/test_forward.py @@ -40,7 +40,6 @@ from tensorflow.python.framework import ops from tensorflow.python.framework import dtypes from tensorflow.python.ops import gen_functional_ops -from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 from distutils.version import LooseVersion import tvm from tvm import te From c0308ca9024912b58fb2c40137750b0e6bc3cb9c Mon Sep 17 00:00:00 2001 From: deepak Date: Mon, 1 Jun 2020 02:53:42 +0530 Subject: [PATCH 55/55] Removed new stateful operators list and combined these operators with missing operators to display as single list. Also removed throwing seperate exception for stateful ops --- python/tvm/relay/frontend/tensorflow.py | 8 -------- tests/python/frontend/tensorflow/test_forward.py | 6 ++++-- 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/python/tvm/relay/frontend/tensorflow.py b/python/tvm/relay/frontend/tensorflow.py index 407f9997134a..6b4a534e51bd 100644 --- a/python/tvm/relay/frontend/tensorflow.py +++ b/python/tvm/relay/frontend/tensorflow.py @@ -2717,7 +2717,6 @@ def __init__(self): self._hash2tfnode = {} self._while_loop_name_set = set() self._main_graph_proto = self - self._stateful_ops_list = [] def _get_relay_func(self, graph, layout="NHWC", shape=None, outputs=None): """Construct relay nodes from tensorflow graph definition - GraphDef. @@ -2774,12 +2773,6 @@ def _get_relay_func(self, graph, layout="NHWC", shape=None, outputs=None): if freezed_ops: raise Exception("Graph is not frozen. Provide a frozen graph. " "Found operators {}".format(freezed_ops)) - stateful_ops = [op for op in missing_operators - if op in self._main_graph_proto._stateful_ops_list] - if stateful_ops: - raise Exception("Found stateful operators in this graph {}. " \ - "Rejecting the graph as TVM does not support stateful operations " \ - .format(stateful_ops)) raise NotImplementedError( "The following operators are not implemented: {}".format(missing_operators)) @@ -2926,7 +2919,6 @@ def _parse_import_prerequisites(self, graph): _control_flow_nodes]]): pass elif op_def is not None and op_def.is_stateful: - self._main_graph_proto._stateful_ops_list.append(node.op) missing_operators.add(node.op) else: missing_operators.add(node.op) diff --git a/tests/python/frontend/tensorflow/test_forward.py b/tests/python/frontend/tensorflow/test_forward.py index f874d7452502..93bf7394c80c 100644 --- a/tests/python/frontend/tensorflow/test_forward.py +++ b/tests/python/frontend/tensorflow/test_forward.py @@ -3430,7 +3430,8 @@ def FunctionWithStatefulOp(m, n): op = FunctionWithStatefulOp(constant_op.constant(1.), constant_op.constant(2.)) with pytest.raises(Exception) as execinfo: compare_tf_with_tvm([], [], [op.name], init_global_variables=True, mode="vm") - assert execinfo.value.args[0].startswith("Found stateful operators in this graph") + assert execinfo.value.args[0].startswith( + "The following operators are not implemented") def _test_spop_device_assignment(): @@ -3487,7 +3488,8 @@ def resourceVariablesTest(x, y): with pytest.raises(Exception) as execinfo: compare_tf_with_tvm([], [], 'StatefulPartitionedCall:0', mode='vm', init_global_variables=True) - assert execinfo.value.args[0].startswith("Graph is not frozen. Provide a frozen graph.") + assert execinfo.value.args[0].startswith("Graph is not frozen." + " Provide a frozen graph") def test_forward_spop(): _test_spop_stateful()