From 1d3a9044b90e127a4340ecbc7cb073450510652f Mon Sep 17 00:00:00 2001 From: yulangz <1301481108@qq.com> Date: Tue, 5 Mar 2024 04:46:13 +0000 Subject: [PATCH] add stable diffusion sub graphs --- .../test_sub_graph_stable_diffusion_0_st.py | 118 +++++++ .../test_sub_graph_stable_diffusion_10_st.py | 313 ++++++++++++++++++ .../test_sub_graph_stable_diffusion_11_st.py | 118 +++++++ .../test_sub_graph_stable_diffusion_12_st.py | 84 +++++ .../test_sub_graph_stable_diffusion_13_st.py | 310 +++++++++++++++++ .../test_sub_graph_stable_diffusion_14_st.py | 118 +++++++ .../test_sub_graph_stable_diffusion_15_st.py | 107 ++++++ .../test_sub_graph_stable_diffusion_16_st.py | 118 +++++++ .../test_sub_graph_stable_diffusion_17_st.py | 84 +++++ .../test_sub_graph_stable_diffusion_18_st.py | 310 +++++++++++++++++ .../test_sub_graph_stable_diffusion_19_st.py | 118 +++++++ .../test_sub_graph_stable_diffusion_1_st.py | 118 +++++++ .../test_sub_graph_stable_diffusion_20_st.py | 107 ++++++ .../test_sub_graph_stable_diffusion_21_st.py | 118 +++++++ .../test_sub_graph_stable_diffusion_22_st.py | 118 +++++++ .../test_sub_graph_stable_diffusion_23_st.py | 99 ++++++ .../test_sub_graph_stable_diffusion_24_st.py | 99 ++++++ .../test_sub_graph_stable_diffusion_25_st.py | 92 +++++ .../test_sub_graph_stable_diffusion_2_st.py | 156 +++++++++ .../test_sub_graph_stable_diffusion_3_st.py | 85 +++++ .../test_sub_graph_stable_diffusion_4_st.py | 114 +++++++ .../test_sub_graph_stable_diffusion_5_st.py | 113 +++++++ .../test_sub_graph_stable_diffusion_6_st.py | 101 ++++++ .../test_sub_graph_stable_diffusion_7_st.py | 118 +++++++ .../test_sub_graph_stable_diffusion_8_st.py | 107 ++++++ .../test_sub_graph_stable_diffusion_9_st.py | 84 +++++ 26 files changed, 3427 insertions(+) create mode 100644 test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_0_st.py create mode 100644 test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_10_st.py create mode 100644 test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_11_st.py create mode 100644 test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_12_st.py create mode 100644 test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_13_st.py create mode 100644 test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_14_st.py create mode 100644 test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_15_st.py create mode 100644 test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_16_st.py create mode 100644 test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_17_st.py create mode 100644 test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_18_st.py create mode 100644 test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_19_st.py create mode 100644 test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_1_st.py create mode 100644 test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_20_st.py create mode 100644 test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_21_st.py create mode 100644 test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_22_st.py create mode 100644 test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_23_st.py create mode 100644 test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_24_st.py create mode 100644 test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_25_st.py create mode 100644 test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_2_st.py create mode 100644 test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_3_st.py create mode 100644 test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_4_st.py create mode 100644 test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_5_st.py create mode 100644 test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_6_st.py create mode 100644 test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_7_st.py create mode 100644 test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_8_st.py create mode 100644 test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_9_st.py diff --git a/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_0_st.py b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_0_st.py new file mode 100644 index 0000000000000..47b4cb70db316 --- /dev/null +++ b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_0_st.py @@ -0,0 +1,118 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# repo: diffusers_sub_grpah +# model: stable_diffusion +# api:paddle.nn.functional.activation.silu||api:paddle.nn.functional.common.dropout||api:paddle.nn.functional.conv.conv2d||api:paddle.nn.functional.conv.conv2d||method:__add__||method:__truediv__ +import unittest + +import numpy as np + +import paddle + + +class LayerCase(paddle.nn.Layer): + def __init__(self): + super().__init__() + self.parameter_0 = self.create_parameter( + shape=[256, 128, 1, 1], + dtype=paddle.float32, + ) + self.parameter_1 = self.create_parameter( + shape=[256], + dtype=paddle.float32, + ) + self.parameter_2 = self.create_parameter( + shape=[256], + dtype=paddle.float32, + ) + self.parameter_3 = self.create_parameter( + shape=[256, 256, 3, 3], + dtype=paddle.float32, + ) + + def forward( + self, + var_0, # (shape: [1, 256, 4, 4], dtype: paddle.float32, stop_gradient: True) + var_1, # (shape: [1, 128, 4, 4], dtype: paddle.float32, stop_gradient: True) + ): + var_2 = paddle.nn.functional.activation.silu(var_0, None) + var_3 = paddle.nn.functional.common.dropout( + var_2, + p=0.0, + axis=None, + training=False, + mode='upscale_in_train', + name=None, + ) + var_4 = paddle.nn.functional.conv.conv2d( + var_3, self.parameter_3, self.parameter_2, [1, 1], 1, [1, 1], 1 + ) + var_5 = paddle.nn.functional.conv.conv2d( + var_1, self.parameter_0, self.parameter_1, [1, 1], 0, [1, 1], 1 + ) + var_6 = var_5 + var_4 + var_7 = var_6 / 1.0 + return var_7 + + +def create_paddle_inputs(): + inputs = ( + paddle.rand(shape=[1, 256, 4, 4], dtype=paddle.float32), + paddle.rand(shape=[1, 128, 4, 4], dtype=paddle.float32), + ) + return inputs + + +def create_numpy_inputs(): + inputs = ( + np.random.random(size=[1, 256, 4, 4]).astype('float32'), + np.random.random(size=[1, 128, 4, 4]).astype('float32'), + ) + return inputs + + +class TestLayer(unittest.TestCase): + def setUp(self): + self.inputs = create_paddle_inputs() + self.net = LayerCase() + + def train(self, net, to_static, with_prim=False, with_cinn=False): + if to_static: + paddle.set_flags({'FLAGS_prim_all': with_prim}) + if with_cinn: + build_strategy = paddle.static.BuildStrategy() + build_strategy.build_cinn_pass = True + net = paddle.jit.to_static( + net, build_strategy=build_strategy, full_graph=True + ) + else: + net = paddle.jit.to_static(net, full_graph=True) + paddle.seed(123) + outs = net(*self.inputs) + return outs + + def test_ast_prim_cinn(self): + st_out = self.train(self.net, to_static=True) + cinn_out = self.train( + self.net, to_static=True, with_prim=True, with_cinn=False + ) + for st, cinn in zip( + paddle.utils.flatten(st_out), paddle.utils.flatten(cinn_out) + ): + np.testing.assert_allclose(st.numpy(), cinn.numpy(), atol=1e-8) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_10_st.py b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_10_st.py new file mode 100644 index 0000000000000..61ee042706c15 --- /dev/null +++ b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_10_st.py @@ -0,0 +1,313 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# repo: diffusers_sub_grpah +# model: stable_diffusion +# api:paddle.nn.functional.conv.conv2d||method:transpose||method:flatten||api:paddle.nn.functional.norm.layer_norm||api:paddle.nn.functional.common.linear||api:paddle.nn.functional.common.linear||api:paddle.nn.functional.common.linear||method:reshape||method:transpose||method:reshape||method:transpose||method:reshape||method:transpose||api:paddle.tensor.linalg.matmul||method:__mul__||api:paddle.nn.functional.activation.softmax||api:paddle.tensor.linalg.matmul||method:transpose||method:reshape||api:paddle.nn.functional.common.linear||api:paddle.nn.functional.common.dropout||method:__truediv__||method:__add__||api:paddle.nn.functional.norm.layer_norm||api:paddle.nn.functional.common.linear||api:paddle.nn.functional.common.linear||api:paddle.nn.functional.common.linear||method:reshape||method:transpose||method:reshape||method:transpose||method:reshape||method:transpose||api:paddle.tensor.linalg.matmul||method:__mul__||api:paddle.nn.functional.activation.softmax||api:paddle.tensor.linalg.matmul||method:transpose||method:reshape||api:paddle.nn.functional.common.linear||api:paddle.nn.functional.common.dropout||method:__truediv__||method:__add__||api:paddle.nn.functional.norm.layer_norm||api:paddle.nn.functional.common.linear||method:chunk||api:paddle.nn.functional.activation.gelu||method:__mul__||api:paddle.nn.functional.common.dropout||api:paddle.nn.functional.common.linear||method:__add__||method:reshape||method:transpose||api:paddle.nn.functional.conv.conv2d||method:__add__ +import unittest + +import numpy as np + +import paddle + + +class LayerCase(paddle.nn.Layer): + def __init__(self): + super().__init__() + self.parameter_0 = self.create_parameter( + shape=[320], + dtype=paddle.float32, + ) + self.parameter_1 = self.create_parameter( + shape=[320], + dtype=paddle.float32, + ) + self.parameter_2 = self.create_parameter( + shape=[320, 320], + dtype=paddle.float32, + ) + self.parameter_3 = self.create_parameter( + shape=[320, 320], + dtype=paddle.float32, + ) + self.parameter_4 = self.create_parameter( + shape=[768, 320], + dtype=paddle.float32, + ) + self.parameter_5 = self.create_parameter( + shape=[320], + dtype=paddle.float32, + ) + self.parameter_6 = self.create_parameter( + shape=[2560], + dtype=paddle.float32, + ) + self.parameter_7 = self.create_parameter( + shape=[320, 320], + dtype=paddle.float32, + ) + self.parameter_8 = self.create_parameter( + shape=[320, 2560], + dtype=paddle.float32, + ) + self.parameter_9 = self.create_parameter( + shape=[320, 320, 1, 1], + dtype=paddle.float32, + ) + self.parameter_10 = self.create_parameter( + shape=[320], + dtype=paddle.float32, + ) + self.parameter_11 = self.create_parameter( + shape=[320], + dtype=paddle.float32, + ) + self.parameter_12 = self.create_parameter( + shape=[320], + dtype=paddle.float32, + ) + self.parameter_13 = self.create_parameter( + shape=[320], + dtype=paddle.float32, + ) + self.parameter_14 = self.create_parameter( + shape=[320], + dtype=paddle.float32, + ) + self.parameter_15 = self.create_parameter( + shape=[1280, 320], + dtype=paddle.float32, + ) + self.parameter_16 = self.create_parameter( + shape=[320], + dtype=paddle.float32, + ) + self.parameter_17 = self.create_parameter( + shape=[320], + dtype=paddle.float32, + ) + self.parameter_18 = self.create_parameter( + shape=[768, 320], + dtype=paddle.float32, + ) + self.parameter_19 = self.create_parameter( + shape=[320, 320], + dtype=paddle.float32, + ) + self.parameter_20 = self.create_parameter( + shape=[320, 320], + dtype=paddle.float32, + ) + self.parameter_21 = self.create_parameter( + shape=[320, 320, 1, 1], + dtype=paddle.float32, + ) + self.parameter_22 = self.create_parameter( + shape=[320], + dtype=paddle.float32, + ) + self.parameter_23 = self.create_parameter( + shape=[320, 320], + dtype=paddle.float32, + ) + + def forward( + self, + var_0, # (shape: [1, 320, 1, 1], dtype: paddle.float32, stop_gradient: False) + var_1, # (shape: [], dtype: paddle.int32, stop_gradient: True) + var_2, # (shape: [], dtype: paddle.int32, stop_gradient: True) + var_3, # (shape: [1, 320, 1, 1], dtype: paddle.float32, stop_gradient: False) + var_4, # (shape: [1, 4, 768], dtype: paddle.float32, stop_gradient: True) + ): + var_5 = paddle.nn.functional.conv.conv2d( + var_0, self.parameter_21, self.parameter_17, [1, 1], 0, [1, 1], 1 + ) + var_6 = var_5.transpose([0, 2, 3, 1]) + var_7 = var_6.flatten(1, 2) + var_8 = paddle.nn.functional.norm.layer_norm( + var_7, + normalized_shape=[320], + weight=self.parameter_5, + bias=self.parameter_10, + epsilon=1e-05, + ) + var_9 = paddle.nn.functional.common.linear( + x=var_8, weight=self.parameter_7, bias=None, name=None + ) + var_10 = paddle.nn.functional.common.linear( + x=var_8, weight=self.parameter_3, bias=None, name=None + ) + var_11 = paddle.nn.functional.common.linear( + x=var_8, weight=self.parameter_19, bias=None, name=None + ) + var_12 = var_9.reshape([0, 0, 8, 40]) + var_13 = var_12.transpose([0, 2, 1, 3]) + var_14 = var_10.reshape([0, 0, 8, 40]) + var_15 = var_14.transpose([0, 2, 1, 3]) + var_16 = var_11.reshape([0, 0, 8, 40]) + var_17 = var_16.transpose([0, 2, 1, 3]) + var_18 = paddle.tensor.linalg.matmul(var_13, var_15, transpose_y=True) + var_19 = var_18 * 0.15811388300841897 + var_20 = paddle.nn.functional.activation.softmax(var_19, axis=-1) + var_21 = paddle.tensor.linalg.matmul(var_20, var_17) + var_22 = var_21.transpose([0, 2, 1, 3]) + var_23 = var_22.reshape([0, 0, 320]) + var_24 = paddle.nn.functional.common.linear( + x=var_23, + weight=self.parameter_20, + bias=self.parameter_14, + name=None, + ) + var_25 = paddle.nn.functional.common.dropout( + var_24, + p=0.0, + axis=None, + training=True, + mode='upscale_in_train', + name=None, + ) + var_26 = var_25 / 1.0 + var_27 = var_26 + var_7 + var_28 = paddle.nn.functional.norm.layer_norm( + var_27, + normalized_shape=[320], + weight=self.parameter_22, + bias=self.parameter_13, + epsilon=1e-05, + ) + var_29 = paddle.nn.functional.common.linear( + x=var_28, weight=self.parameter_23, bias=None, name=None + ) + var_30 = paddle.nn.functional.common.linear( + x=var_4, weight=self.parameter_4, bias=None, name=None + ) + var_31 = paddle.nn.functional.common.linear( + x=var_4, weight=self.parameter_18, bias=None, name=None + ) + var_32 = var_29.reshape([0, 0, 8, 40]) + var_33 = var_32.transpose([0, 2, 1, 3]) + var_34 = var_30.reshape([0, 0, 8, 40]) + var_35 = var_34.transpose([0, 2, 1, 3]) + var_36 = var_31.reshape([0, 0, 8, 40]) + var_37 = var_36.transpose([0, 2, 1, 3]) + var_38 = paddle.tensor.linalg.matmul(var_33, var_35, transpose_y=True) + var_39 = var_38 * 0.15811388300841897 + var_40 = paddle.nn.functional.activation.softmax(var_39, axis=-1) + var_41 = paddle.tensor.linalg.matmul(var_40, var_37) + var_42 = var_41.transpose([0, 2, 1, 3]) + var_43 = var_42.reshape([0, 0, 320]) + var_44 = paddle.nn.functional.common.linear( + x=var_43, weight=self.parameter_2, bias=self.parameter_0, name=None + ) + var_45 = paddle.nn.functional.common.dropout( + var_44, + p=0.0, + axis=None, + training=True, + mode='upscale_in_train', + name=None, + ) + var_46 = var_45 / 1.0 + var_47 = var_46 + var_27 + var_48 = paddle.nn.functional.norm.layer_norm( + var_47, + normalized_shape=[320], + weight=self.parameter_12, + bias=self.parameter_16, + epsilon=1e-05, + ) + var_49 = paddle.nn.functional.common.linear( + var_48, self.parameter_8, self.parameter_6 + ) + out = var_49.chunk(2, axis=-1) + var_50 = out[0] + var_51 = out[1] + var_52 = paddle.nn.functional.activation.gelu(var_51) + var_53 = var_50 * var_52 + var_54 = paddle.nn.functional.common.dropout( + var_53, + p=0.0, + axis=None, + training=True, + mode='upscale_in_train', + name=None, + ) + var_55 = paddle.nn.functional.common.linear( + var_54, self.parameter_15, self.parameter_1 + ) + var_56 = var_55 + var_47 + var_57 = var_56.reshape([-1, var_1, var_2, 320]) + var_58 = var_57.transpose([0, 3, 1, 2]) + var_59 = paddle.nn.functional.conv.conv2d( + var_58, self.parameter_9, self.parameter_11, [1, 1], 0, [1, 1], 1 + ) + var_60 = var_59 + var_3 + return var_60 + + +def create_paddle_inputs(): + inputs = ( + paddle.rand(shape=[1, 320, 1, 1], dtype=paddle.float32), + paddle.randint(low=1, high=2, shape=[1], dtype=paddle.int32), + paddle.randint(low=1, high=2, shape=[1], dtype=paddle.int32), + paddle.rand(shape=[1, 320, 1, 1], dtype=paddle.float32), + paddle.rand(shape=[1, 4, 768], dtype=paddle.float32), + ) + return inputs + + +def create_numpy_inputs(): + inputs = ( + np.random.random(size=[1, 320, 1, 1]).astype('float32'), + np.random.randint(low=1, high=2, size=[1], dtype='int32'), + np.random.randint(low=1, high=2, size=[1], dtype='int32'), + np.random.random(size=[1, 320, 1, 1]).astype('float32'), + np.random.random(size=[1, 4, 768]).astype('float32'), + ) + return inputs + + +class TestLayer(unittest.TestCase): + def setUp(self): + self.inputs = create_paddle_inputs() + self.net = LayerCase() + + def train(self, net, to_static, with_prim=False, with_cinn=False): + if to_static: + paddle.set_flags({'FLAGS_prim_all': with_prim}) + if with_cinn: + build_strategy = paddle.static.BuildStrategy() + build_strategy.build_cinn_pass = True + net = paddle.jit.to_static( + net, build_strategy=build_strategy, full_graph=True + ) + else: + net = paddle.jit.to_static(net, full_graph=True) + paddle.seed(123) + outs = net(*self.inputs) + return outs + + def test_ast_prim_cinn(self): + st_out = self.train(self.net, to_static=True) + cinn_out = self.train( + self.net, to_static=True, with_prim=False, with_cinn=False + ) + for st, cinn in zip( + paddle.utils.flatten(st_out), paddle.utils.flatten(cinn_out) + ): + np.testing.assert_allclose(st.numpy(), cinn.numpy(), atol=1e-8) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_11_st.py b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_11_st.py new file mode 100644 index 0000000000000..ec6d5d1c2b8d3 --- /dev/null +++ b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_11_st.py @@ -0,0 +1,118 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# repo: diffusers_sub_grpah +# model: stable_diffusion +# api:paddle.nn.functional.activation.silu||api:paddle.nn.functional.common.dropout||api:paddle.nn.functional.conv.conv2d||api:paddle.nn.functional.conv.conv2d||method:__add__||method:__truediv__ +import unittest + +import numpy as np + +import paddle + + +class LayerCase(paddle.nn.Layer): + def __init__(self): + super().__init__() + self.parameter_0 = self.create_parameter( + shape=[640], + dtype=paddle.float32, + ) + self.parameter_1 = self.create_parameter( + shape=[640, 320, 1, 1], + dtype=paddle.float32, + ) + self.parameter_2 = self.create_parameter( + shape=[640, 640, 3, 3], + dtype=paddle.float32, + ) + self.parameter_3 = self.create_parameter( + shape=[640], + dtype=paddle.float32, + ) + + def forward( + self, + var_0, # (shape: [1, 640, 1, 1], dtype: paddle.float32, stop_gradient: False) + var_1, # (shape: [1, 320, 1, 1], dtype: paddle.float32, stop_gradient: False) + ): + var_2 = paddle.nn.functional.activation.silu(var_0, None) + var_3 = paddle.nn.functional.common.dropout( + var_2, + p=0.0, + axis=None, + training=True, + mode='upscale_in_train', + name=None, + ) + var_4 = paddle.nn.functional.conv.conv2d( + var_3, self.parameter_2, self.parameter_0, [1, 1], 1, [1, 1], 1 + ) + var_5 = paddle.nn.functional.conv.conv2d( + var_1, self.parameter_1, self.parameter_3, [1, 1], 0, [1, 1], 1 + ) + var_6 = var_5 + var_4 + var_7 = var_6 / 1.0 + return var_7 + + +def create_paddle_inputs(): + inputs = ( + paddle.rand(shape=[1, 640, 1, 1], dtype=paddle.float32), + paddle.rand(shape=[1, 320, 1, 1], dtype=paddle.float32), + ) + return inputs + + +def create_numpy_inputs(): + inputs = ( + np.random.random(size=[1, 640, 1, 1]).astype('float32'), + np.random.random(size=[1, 320, 1, 1]).astype('float32'), + ) + return inputs + + +class TestLayer(unittest.TestCase): + def setUp(self): + self.inputs = create_paddle_inputs() + self.net = LayerCase() + + def train(self, net, to_static, with_prim=False, with_cinn=False): + if to_static: + paddle.set_flags({'FLAGS_prim_all': with_prim}) + if with_cinn: + build_strategy = paddle.static.BuildStrategy() + build_strategy.build_cinn_pass = True + net = paddle.jit.to_static( + net, build_strategy=build_strategy, full_graph=True + ) + else: + net = paddle.jit.to_static(net, full_graph=True) + paddle.seed(123) + outs = net(*self.inputs) + return outs + + def test_ast_prim_cinn(self): + st_out = self.train(self.net, to_static=True) + cinn_out = self.train( + self.net, to_static=True, with_prim=True, with_cinn=False + ) + for st, cinn in zip( + paddle.utils.flatten(st_out), paddle.utils.flatten(cinn_out) + ): + np.testing.assert_allclose(st.numpy(), cinn.numpy(), atol=1e-8) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_12_st.py b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_12_st.py new file mode 100644 index 0000000000000..3b137f969f88e --- /dev/null +++ b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_12_st.py @@ -0,0 +1,84 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# repo: diffusers_sub_grpah +# model: stable_diffusion +# method:cast||api:paddle.tensor.attribute.shape||method:__getitem__||method:__getitem__||method:__getitem__||method:__getitem__ +import unittest + +import numpy as np + +import paddle + + +class LayerCase(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + var_0, # (shape: [1, 640, 1, 1], dtype: paddle.float32, stop_gradient: False) + ): + var_1 = var_0.cast('float32') + var_2 = paddle.tensor.attribute.shape(var_1) + var_3 = var_2[0] + var_4 = var_2[1] + var_5 = var_2[2] + var_6 = var_2[3] + return var_1, var_5, var_6 + + +def create_paddle_inputs(): + inputs = (paddle.rand(shape=[1, 640, 1, 1], dtype=paddle.float32),) + return inputs + + +def create_numpy_inputs(): + inputs = (np.random.random(size=[1, 640, 1, 1]).astype('float32'),) + return inputs + + +class TestLayer(unittest.TestCase): + def setUp(self): + self.inputs = create_paddle_inputs() + self.net = LayerCase() + + def train(self, net, to_static, with_prim=False, with_cinn=False): + if to_static: + paddle.set_flags({'FLAGS_prim_all': with_prim}) + if with_cinn: + build_strategy = paddle.static.BuildStrategy() + build_strategy.build_cinn_pass = True + net = paddle.jit.to_static( + net, build_strategy=build_strategy, full_graph=True + ) + else: + net = paddle.jit.to_static(net, full_graph=True) + paddle.seed(123) + outs = net(*self.inputs) + return outs + + def test_ast_prim_cinn(self): + st_out = self.train(self.net, to_static=True) + cinn_out = self.train( + self.net, to_static=True, with_prim=True, with_cinn=False + ) + for st, cinn in zip( + paddle.utils.flatten(st_out), paddle.utils.flatten(cinn_out) + ): + np.testing.assert_allclose(st.numpy(), cinn.numpy(), atol=1e-8) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_13_st.py b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_13_st.py new file mode 100644 index 0000000000000..7c738b78a438f --- /dev/null +++ b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_13_st.py @@ -0,0 +1,310 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# repo: diffusers_sub_grpah +# model: stable_diffusion +# api:paddle.nn.functional.conv.conv2d||method:transpose||method:flatten||api:paddle.nn.functional.norm.layer_norm||api:paddle.nn.functional.common.linear||api:paddle.nn.functional.common.linear||api:paddle.nn.functional.common.linear||method:reshape||method:transpose||method:reshape||method:transpose||method:reshape||method:transpose||api:paddle.tensor.linalg.matmul||method:__mul__||api:paddle.nn.functional.activation.softmax||api:paddle.tensor.linalg.matmul||method:transpose||method:reshape||api:paddle.nn.functional.common.linear||api:paddle.nn.functional.common.dropout||method:__truediv__||method:__add__||api:paddle.nn.functional.norm.layer_norm||api:paddle.nn.functional.common.linear||api:paddle.nn.functional.common.linear||api:paddle.nn.functional.common.linear||method:reshape||method:transpose||method:reshape||method:transpose||method:reshape||method:transpose||api:paddle.tensor.linalg.matmul||method:__mul__||api:paddle.nn.functional.activation.softmax||api:paddle.tensor.linalg.matmul||method:transpose||method:reshape||api:paddle.nn.functional.common.linear||api:paddle.nn.functional.common.dropout||method:__truediv__||method:__add__||api:paddle.nn.functional.norm.layer_norm||api:paddle.nn.functional.common.linear||method:chunk||api:paddle.nn.functional.activation.gelu||method:__mul__||api:paddle.nn.functional.common.dropout||api:paddle.nn.functional.common.linear||method:__add__||method:reshape||method:transpose||api:paddle.nn.functional.conv.conv2d||method:__add__ +import unittest + +import numpy as np + +import paddle + + +class LayerCase(paddle.nn.Layer): + def __init__(self): + super().__init__() + self.parameter_0 = self.create_parameter( + shape=[640, 640], + dtype=paddle.float32, + ) + self.parameter_1 = self.create_parameter( + shape=[640], + dtype=paddle.float32, + ) + self.parameter_2 = self.create_parameter( + shape=[640, 640], + dtype=paddle.float32, + ) + self.parameter_3 = self.create_parameter( + shape=[640], + dtype=paddle.float32, + ) + self.parameter_4 = self.create_parameter( + shape=[640, 5120], + dtype=paddle.float32, + ) + self.parameter_5 = self.create_parameter( + shape=[640, 640, 1, 1], + dtype=paddle.float32, + ) + self.parameter_6 = self.create_parameter( + shape=[640], + dtype=paddle.float32, + ) + self.parameter_7 = self.create_parameter( + shape=[640, 640], + dtype=paddle.float32, + ) + self.parameter_8 = self.create_parameter( + shape=[640, 640, 1, 1], + dtype=paddle.float32, + ) + self.parameter_9 = self.create_parameter( + shape=[640], + dtype=paddle.float32, + ) + self.parameter_10 = self.create_parameter( + shape=[640], + dtype=paddle.float32, + ) + self.parameter_11 = self.create_parameter( + shape=[640, 640], + dtype=paddle.float32, + ) + self.parameter_12 = self.create_parameter( + shape=[640, 640], + dtype=paddle.float32, + ) + self.parameter_13 = self.create_parameter( + shape=[5120], + dtype=paddle.float32, + ) + self.parameter_14 = self.create_parameter( + shape=[640], + dtype=paddle.float32, + ) + self.parameter_15 = self.create_parameter( + shape=[2560, 640], + dtype=paddle.float32, + ) + self.parameter_16 = self.create_parameter( + shape=[640], + dtype=paddle.float32, + ) + self.parameter_17 = self.create_parameter( + shape=[640], + dtype=paddle.float32, + ) + self.parameter_18 = self.create_parameter( + shape=[640, 640], + dtype=paddle.float32, + ) + self.parameter_19 = self.create_parameter( + shape=[640], + dtype=paddle.float32, + ) + self.parameter_20 = self.create_parameter( + shape=[768, 640], + dtype=paddle.float32, + ) + self.parameter_21 = self.create_parameter( + shape=[768, 640], + dtype=paddle.float32, + ) + self.parameter_22 = self.create_parameter( + shape=[640], + dtype=paddle.float32, + ) + self.parameter_23 = self.create_parameter( + shape=[640], + dtype=paddle.float32, + ) + + def forward( + self, + var_0, # (shape: [1, 640, 1, 1], dtype: paddle.float32, stop_gradient: False) + var_1, # (shape: [], dtype: paddle.int32, stop_gradient: True) + var_2, # (shape: [], dtype: paddle.int32, stop_gradient: True) + var_3, # (shape: [1, 640, 1, 1], dtype: paddle.float32, stop_gradient: False) + var_4, # (shape: [1, 4, 768], dtype: paddle.float32, stop_gradient: True) + ): + var_5 = paddle.nn.functional.conv.conv2d( + var_0, self.parameter_8, self.parameter_1, [1, 1], 0, [1, 1], 1 + ) + var_6 = var_5.transpose([0, 2, 3, 1]) + var_7 = var_6.flatten(1, 2) + var_8 = paddle.nn.functional.norm.layer_norm( + var_7, + normalized_shape=[640], + weight=self.parameter_17, + bias=self.parameter_16, + epsilon=1e-05, + ) + var_9 = paddle.nn.functional.common.linear( + x=var_8, weight=self.parameter_12, bias=None, name=None + ) + var_10 = paddle.nn.functional.common.linear( + x=var_8, weight=self.parameter_11, bias=None, name=None + ) + var_11 = paddle.nn.functional.common.linear( + x=var_8, weight=self.parameter_2, bias=None, name=None + ) + var_12 = var_9.reshape([0, 0, 8, 80]) + var_13 = var_12.transpose([0, 2, 1, 3]) + var_14 = var_10.reshape([0, 0, 8, 80]) + var_15 = var_14.transpose([0, 2, 1, 3]) + var_16 = var_11.reshape([0, 0, 8, 80]) + var_17 = var_16.transpose([0, 2, 1, 3]) + var_18 = paddle.tensor.linalg.matmul(var_13, var_15, transpose_y=True) + var_19 = var_18 * 0.11180339887498948 + var_20 = paddle.nn.functional.activation.softmax(var_19, axis=-1) + var_21 = paddle.tensor.linalg.matmul(var_20, var_17) + var_22 = var_21.transpose([0, 2, 1, 3]) + var_23 = var_22.reshape([0, 0, 640]) + var_24 = paddle.nn.functional.common.linear( + x=var_23, weight=self.parameter_7, bias=self.parameter_10, name=None + ) + var_25 = paddle.nn.functional.common.dropout( + var_24, + p=0.0, + axis=None, + training=True, + mode='upscale_in_train', + name=None, + ) + var_26 = var_25 / 1.0 + var_27 = var_26 + var_7 + var_28 = paddle.nn.functional.norm.layer_norm( + var_27, + normalized_shape=[640], + weight=self.parameter_9, + bias=self.parameter_3, + epsilon=1e-05, + ) + var_29 = paddle.nn.functional.common.linear( + x=var_28, weight=self.parameter_0, bias=None, name=None + ) + var_30 = paddle.nn.functional.common.linear( + x=var_4, weight=self.parameter_20, bias=None, name=None + ) + var_31 = paddle.nn.functional.common.linear( + x=var_4, weight=self.parameter_21, bias=None, name=None + ) + var_32 = var_29.reshape([0, 0, 8, 80]) + var_33 = var_32.transpose([0, 2, 1, 3]) + var_34 = var_30.reshape([0, 0, 8, 80]) + var_35 = var_34.transpose([0, 2, 1, 3]) + var_36 = var_31.reshape([0, 0, 8, 80]) + var_37 = var_36.transpose([0, 2, 1, 3]) + var_38 = paddle.tensor.linalg.matmul(var_33, var_35, transpose_y=True) + var_39 = var_38 * 0.11180339887498948 + var_40 = paddle.nn.functional.activation.softmax(var_39, axis=-1) + var_41 = paddle.tensor.linalg.matmul(var_40, var_37) + var_42 = var_41.transpose([0, 2, 1, 3]) + var_43 = var_42.reshape([0, 0, 640]) + var_44 = paddle.nn.functional.common.linear( + x=var_43, weight=self.parameter_18, bias=self.parameter_6, name=None + ) + var_45 = paddle.nn.functional.common.dropout( + var_44, + p=0.0, + axis=None, + training=True, + mode='upscale_in_train', + name=None, + ) + var_46 = var_45 / 1.0 + var_47 = var_46 + var_27 + var_48 = paddle.nn.functional.norm.layer_norm( + var_47, + normalized_shape=[640], + weight=self.parameter_19, + bias=self.parameter_23, + epsilon=1e-05, + ) + var_49 = paddle.nn.functional.common.linear( + var_48, self.parameter_4, self.parameter_13 + ) + out = var_49.chunk(2, axis=-1) + var_50 = out[0] + var_51 = out[1] + var_52 = paddle.nn.functional.activation.gelu(var_51) + var_53 = var_50 * var_52 + var_54 = paddle.nn.functional.common.dropout( + var_53, + p=0.0, + axis=None, + training=True, + mode='upscale_in_train', + name=None, + ) + var_55 = paddle.nn.functional.common.linear( + var_54, self.parameter_15, self.parameter_14 + ) + var_56 = var_55 + var_47 + var_57 = var_56.reshape([-1, var_1, var_2, 640]) + var_58 = var_57.transpose([0, 3, 1, 2]) + var_59 = paddle.nn.functional.conv.conv2d( + var_58, self.parameter_5, self.parameter_22, [1, 1], 0, [1, 1], 1 + ) + var_60 = var_59 + var_3 + return var_60 + + +def create_paddle_inputs(): + inputs = ( + paddle.rand(shape=[1, 640, 1, 1], dtype=paddle.float32), + paddle.randint(low=1, high=2, shape=[1], dtype=paddle.int32), + paddle.randint(low=1, high=2, shape=[1], dtype=paddle.int32), + paddle.rand(shape=[1, 640, 1, 1], dtype=paddle.float32), + paddle.rand(shape=[1, 4, 768], dtype=paddle.float32), + ) + return inputs + + +def create_numpy_inputs(): + inputs = ( + np.random.random(size=[1, 640, 1, 1]).astype('float32'), + np.random.randint(low=1, high=2, size=[1], dtype='int32'), + np.random.randint(low=1, high=2, size=[1], dtype='int32'), + np.random.random(size=[1, 640, 1, 1]).astype('float32'), + np.random.random(size=[1, 4, 768]).astype('float32'), + ) + return inputs + + +class TestLayer(unittest.TestCase): + def setUp(self): + self.inputs = create_paddle_inputs() + self.net = LayerCase() + + def train(self, net, to_static, with_prim=False, with_cinn=False): + if to_static: + paddle.set_flags({'FLAGS_prim_all': with_prim}) + if with_cinn: + build_strategy = paddle.static.BuildStrategy() + build_strategy.build_cinn_pass = True + net = paddle.jit.to_static( + net, build_strategy=build_strategy, full_graph=True + ) + else: + net = paddle.jit.to_static(net, full_graph=True) + paddle.seed(123) + outs = net(*self.inputs) + return outs + + def test_ast_prim_cinn(self): + st_out = self.train(self.net, to_static=True) + cinn_out = self.train( + self.net, to_static=True, with_prim=False, with_cinn=False + ) + for st, cinn in zip( + paddle.utils.flatten(st_out), paddle.utils.flatten(cinn_out) + ): + np.testing.assert_allclose(st.numpy(), cinn.numpy(), atol=1e-8) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_14_st.py b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_14_st.py new file mode 100644 index 0000000000000..78e38d8f6b4ca --- /dev/null +++ b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_14_st.py @@ -0,0 +1,118 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# repo: diffusers_sub_grpah +# model: stable_diffusion +# api:paddle.nn.functional.activation.silu||api:paddle.nn.functional.conv.conv2d||api:paddle.nn.functional.activation.silu||api:paddle.nn.functional.common.linear||method:__getitem__||method:__add__ +import unittest + +import numpy as np + +import paddle + + +class LayerCase(paddle.nn.Layer): + def __init__(self): + super().__init__() + self.parameter_0 = self.create_parameter( + shape=[640, 640, 3, 3], + dtype=paddle.float32, + ) + self.parameter_1 = self.create_parameter( + shape=[640], + dtype=paddle.float32, + ) + self.parameter_2 = self.create_parameter( + shape=[1280, 640], + dtype=paddle.float32, + ) + self.parameter_3 = self.create_parameter( + shape=[640], + dtype=paddle.float32, + ) + + def forward( + self, + var_0, # (shape: [1, 640, 1, 1], dtype: paddle.float32, stop_gradient: False) + var_1, # (shape: [1, 1280], dtype: paddle.float32, stop_gradient: False) + ): + var_2 = paddle.nn.functional.activation.silu(var_0, None) + var_3 = paddle.nn.functional.conv.conv2d( + var_2, self.parameter_0, self.parameter_1, [1, 1], 1, [1, 1], 1 + ) + var_4 = paddle.nn.functional.activation.silu(var_1, None) + var_5 = paddle.nn.functional.common.linear( + var_4, self.parameter_2, self.parameter_3 + ) + var_6 = var_5[ + ( + slice(None, None, None), + slice(None, None, None), + None, + None, + ) + ] + var_7 = var_3 + var_6 + return var_7, var_6 + + +def create_paddle_inputs(): + inputs = ( + paddle.rand(shape=[1, 640, 1, 1], dtype=paddle.float32), + paddle.rand(shape=[1, 1280], dtype=paddle.float32), + ) + return inputs + + +def create_numpy_inputs(): + inputs = ( + np.random.random(size=[1, 640, 1, 1]).astype('float32'), + np.random.random(size=[1, 1280]).astype('float32'), + ) + return inputs + + +class TestLayer(unittest.TestCase): + def setUp(self): + self.inputs = create_paddle_inputs() + self.net = LayerCase() + + def train(self, net, to_static, with_prim=False, with_cinn=False): + if to_static: + paddle.set_flags({'FLAGS_prim_all': with_prim}) + if with_cinn: + build_strategy = paddle.static.BuildStrategy() + build_strategy.build_cinn_pass = True + net = paddle.jit.to_static( + net, build_strategy=build_strategy, full_graph=True + ) + else: + net = paddle.jit.to_static(net, full_graph=True) + paddle.seed(123) + outs = net(*self.inputs) + return outs + + def test_ast_prim_cinn(self): + st_out = self.train(self.net, to_static=True) + cinn_out = self.train( + self.net, to_static=True, with_prim=True, with_cinn=False + ) + for st, cinn in zip( + paddle.utils.flatten(st_out), paddle.utils.flatten(cinn_out) + ): + np.testing.assert_allclose(st.numpy(), cinn.numpy(), atol=1e-8) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_15_st.py b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_15_st.py new file mode 100644 index 0000000000000..3296ab20fcb6e --- /dev/null +++ b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_15_st.py @@ -0,0 +1,107 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# repo: diffusers_sub_grpah +# model: stable_diffusion +# api:paddle.nn.functional.activation.silu||api:paddle.nn.functional.common.dropout||api:paddle.nn.functional.conv.conv2d||method:__add__||method:__truediv__ +import unittest + +import numpy as np + +import paddle + + +class LayerCase(paddle.nn.Layer): + def __init__(self): + super().__init__() + self.parameter_0 = self.create_parameter( + shape=[640, 640, 3, 3], + dtype=paddle.float32, + ) + self.parameter_1 = self.create_parameter( + shape=[640], + dtype=paddle.float32, + ) + + def forward( + self, + var_0, # (shape: [1, 640, 1, 1], dtype: paddle.float32, stop_gradient: False) + var_1, # (shape: [1, 640, 1, 1], dtype: paddle.float32, stop_gradient: False) + ): + var_2 = paddle.nn.functional.activation.silu(var_0, None) + var_3 = paddle.nn.functional.common.dropout( + var_2, + p=0.0, + axis=None, + training=True, + mode='upscale_in_train', + name=None, + ) + var_4 = paddle.nn.functional.conv.conv2d( + var_3, self.parameter_0, self.parameter_1, [1, 1], 1, [1, 1], 1 + ) + var_5 = var_1 + var_4 + var_6 = var_5 / 1.0 + return var_6 + + +def create_paddle_inputs(): + inputs = ( + paddle.rand(shape=[1, 640, 1, 1], dtype=paddle.float32), + paddle.rand(shape=[1, 640, 1, 1], dtype=paddle.float32), + ) + return inputs + + +def create_numpy_inputs(): + inputs = ( + np.random.random(size=[1, 640, 1, 1]).astype('float32'), + np.random.random(size=[1, 640, 1, 1]).astype('float32'), + ) + return inputs + + +class TestLayer(unittest.TestCase): + def setUp(self): + self.inputs = create_paddle_inputs() + self.net = LayerCase() + + def train(self, net, to_static, with_prim=False, with_cinn=False): + if to_static: + paddle.set_flags({'FLAGS_prim_all': with_prim}) + if with_cinn: + build_strategy = paddle.static.BuildStrategy() + build_strategy.build_cinn_pass = True + net = paddle.jit.to_static( + net, build_strategy=build_strategy, full_graph=True + ) + else: + net = paddle.jit.to_static(net, full_graph=True) + paddle.seed(123) + outs = net(*self.inputs) + return outs + + def test_ast_prim_cinn(self): + st_out = self.train(self.net, to_static=True) + cinn_out = self.train( + self.net, to_static=True, with_prim=True, with_cinn=True + ) + for st, cinn in zip( + paddle.utils.flatten(st_out), paddle.utils.flatten(cinn_out) + ): + np.testing.assert_allclose(st.numpy(), cinn.numpy(), atol=1e-8) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_16_st.py b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_16_st.py new file mode 100644 index 0000000000000..07842bda1f444 --- /dev/null +++ b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_16_st.py @@ -0,0 +1,118 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# repo: diffusers_sub_grpah +# model: stable_diffusion +# api:paddle.nn.functional.activation.silu||api:paddle.nn.functional.common.dropout||api:paddle.nn.functional.conv.conv2d||api:paddle.nn.functional.conv.conv2d||method:__add__||method:__truediv__ +import unittest + +import numpy as np + +import paddle + + +class LayerCase(paddle.nn.Layer): + def __init__(self): + super().__init__() + self.parameter_0 = self.create_parameter( + shape=[1280], + dtype=paddle.float32, + ) + self.parameter_1 = self.create_parameter( + shape=[1280], + dtype=paddle.float32, + ) + self.parameter_2 = self.create_parameter( + shape=[1280, 1280, 3, 3], + dtype=paddle.float32, + ) + self.parameter_3 = self.create_parameter( + shape=[1280, 640, 1, 1], + dtype=paddle.float32, + ) + + def forward( + self, + var_0, # (shape: [1, 1280, 1, 1], dtype: paddle.float32, stop_gradient: False) + var_1, # (shape: [1, 640, 1, 1], dtype: paddle.float32, stop_gradient: False) + ): + var_2 = paddle.nn.functional.activation.silu(var_0, None) + var_3 = paddle.nn.functional.common.dropout( + var_2, + p=0.0, + axis=None, + training=True, + mode='upscale_in_train', + name=None, + ) + var_4 = paddle.nn.functional.conv.conv2d( + var_3, self.parameter_2, self.parameter_0, [1, 1], 1, [1, 1], 1 + ) + var_5 = paddle.nn.functional.conv.conv2d( + var_1, self.parameter_3, self.parameter_1, [1, 1], 0, [1, 1], 1 + ) + var_6 = var_5 + var_4 + var_7 = var_6 / 1.0 + return var_7 + + +def create_paddle_inputs(): + inputs = ( + paddle.rand(shape=[1, 1280, 1, 1], dtype=paddle.float32), + paddle.rand(shape=[1, 640, 1, 1], dtype=paddle.float32), + ) + return inputs + + +def create_numpy_inputs(): + inputs = ( + np.random.random(size=[1, 1280, 1, 1]).astype('float32'), + np.random.random(size=[1, 640, 1, 1]).astype('float32'), + ) + return inputs + + +class TestLayer(unittest.TestCase): + def setUp(self): + self.inputs = create_paddle_inputs() + self.net = LayerCase() + + def train(self, net, to_static, with_prim=False, with_cinn=False): + if to_static: + paddle.set_flags({'FLAGS_prim_all': with_prim}) + if with_cinn: + build_strategy = paddle.static.BuildStrategy() + build_strategy.build_cinn_pass = True + net = paddle.jit.to_static( + net, build_strategy=build_strategy, full_graph=True + ) + else: + net = paddle.jit.to_static(net, full_graph=True) + paddle.seed(123) + outs = net(*self.inputs) + return outs + + def test_ast_prim_cinn(self): + st_out = self.train(self.net, to_static=True) + cinn_out = self.train( + self.net, to_static=True, with_prim=True, with_cinn=False + ) + for st, cinn in zip( + paddle.utils.flatten(st_out), paddle.utils.flatten(cinn_out) + ): + np.testing.assert_allclose(st.numpy(), cinn.numpy(), atol=1e-8) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_17_st.py b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_17_st.py new file mode 100644 index 0000000000000..c7945200d2394 --- /dev/null +++ b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_17_st.py @@ -0,0 +1,84 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# repo: diffusers_sub_grpah +# model: stable_diffusion +# method:cast||api:paddle.tensor.attribute.shape||method:__getitem__||method:__getitem__||method:__getitem__||method:__getitem__ +import unittest + +import numpy as np + +import paddle + + +class LayerCase(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + var_0, # (shape: [1, 1280, 1, 1], dtype: paddle.float32, stop_gradient: False) + ): + var_1 = var_0.cast('float32') + var_2 = paddle.tensor.attribute.shape(var_1) + var_3 = var_2[0] + var_4 = var_2[1] + var_5 = var_2[2] + var_6 = var_2[3] + return var_1, var_5, var_6 + + +def create_paddle_inputs(): + inputs = (paddle.rand(shape=[1, 1280, 1, 1], dtype=paddle.float32),) + return inputs + + +def create_numpy_inputs(): + inputs = (np.random.random(size=[1, 1280, 1, 1]).astype('float32'),) + return inputs + + +class TestLayer(unittest.TestCase): + def setUp(self): + self.inputs = create_paddle_inputs() + self.net = LayerCase() + + def train(self, net, to_static, with_prim=False, with_cinn=False): + if to_static: + paddle.set_flags({'FLAGS_prim_all': with_prim}) + if with_cinn: + build_strategy = paddle.static.BuildStrategy() + build_strategy.build_cinn_pass = True + net = paddle.jit.to_static( + net, build_strategy=build_strategy, full_graph=True + ) + else: + net = paddle.jit.to_static(net, full_graph=True) + paddle.seed(123) + outs = net(*self.inputs) + return outs + + def test_ast_prim_cinn(self): + st_out = self.train(self.net, to_static=True) + cinn_out = self.train( + self.net, to_static=True, with_prim=True, with_cinn=False + ) + for st, cinn in zip( + paddle.utils.flatten(st_out), paddle.utils.flatten(cinn_out) + ): + np.testing.assert_allclose(st.numpy(), cinn.numpy(), atol=1e-8) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_18_st.py b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_18_st.py new file mode 100644 index 0000000000000..eb4869b7ca459 --- /dev/null +++ b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_18_st.py @@ -0,0 +1,310 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# repo: diffusers_sub_grpah +# model: stable_diffusion +# api:paddle.nn.functional.conv.conv2d||method:transpose||method:flatten||api:paddle.nn.functional.norm.layer_norm||api:paddle.nn.functional.common.linear||api:paddle.nn.functional.common.linear||api:paddle.nn.functional.common.linear||method:reshape||method:transpose||method:reshape||method:transpose||method:reshape||method:transpose||api:paddle.tensor.linalg.matmul||method:__mul__||api:paddle.nn.functional.activation.softmax||api:paddle.tensor.linalg.matmul||method:transpose||method:reshape||api:paddle.nn.functional.common.linear||api:paddle.nn.functional.common.dropout||method:__truediv__||method:__add__||api:paddle.nn.functional.norm.layer_norm||api:paddle.nn.functional.common.linear||api:paddle.nn.functional.common.linear||api:paddle.nn.functional.common.linear||method:reshape||method:transpose||method:reshape||method:transpose||method:reshape||method:transpose||api:paddle.tensor.linalg.matmul||method:__mul__||api:paddle.nn.functional.activation.softmax||api:paddle.tensor.linalg.matmul||method:transpose||method:reshape||api:paddle.nn.functional.common.linear||api:paddle.nn.functional.common.dropout||method:__truediv__||method:__add__||api:paddle.nn.functional.norm.layer_norm||api:paddle.nn.functional.common.linear||method:chunk||api:paddle.nn.functional.activation.gelu||method:__mul__||api:paddle.nn.functional.common.dropout||api:paddle.nn.functional.common.linear||method:__add__||method:reshape||method:transpose||api:paddle.nn.functional.conv.conv2d||method:__add__ +import unittest + +import numpy as np + +import paddle + + +class LayerCase(paddle.nn.Layer): + def __init__(self): + super().__init__() + self.parameter_0 = self.create_parameter( + shape=[1280, 1280], + dtype=paddle.float32, + ) + self.parameter_1 = self.create_parameter( + shape=[1280], + dtype=paddle.float32, + ) + self.parameter_2 = self.create_parameter( + shape=[1280], + dtype=paddle.float32, + ) + self.parameter_3 = self.create_parameter( + shape=[1280], + dtype=paddle.float32, + ) + self.parameter_4 = self.create_parameter( + shape=[1280], + dtype=paddle.float32, + ) + self.parameter_5 = self.create_parameter( + shape=[1280, 1280], + dtype=paddle.float32, + ) + self.parameter_6 = self.create_parameter( + shape=[1280, 1280], + dtype=paddle.float32, + ) + self.parameter_7 = self.create_parameter( + shape=[1280], + dtype=paddle.float32, + ) + self.parameter_8 = self.create_parameter( + shape=[1280], + dtype=paddle.float32, + ) + self.parameter_9 = self.create_parameter( + shape=[10240], + dtype=paddle.float32, + ) + self.parameter_10 = self.create_parameter( + shape=[1280, 1280, 1, 1], + dtype=paddle.float32, + ) + self.parameter_11 = self.create_parameter( + shape=[1280], + dtype=paddle.float32, + ) + self.parameter_12 = self.create_parameter( + shape=[1280, 1280, 1, 1], + dtype=paddle.float32, + ) + self.parameter_13 = self.create_parameter( + shape=[1280, 1280], + dtype=paddle.float32, + ) + self.parameter_14 = self.create_parameter( + shape=[5120, 1280], + dtype=paddle.float32, + ) + self.parameter_15 = self.create_parameter( + shape=[768, 1280], + dtype=paddle.float32, + ) + self.parameter_16 = self.create_parameter( + shape=[1280], + dtype=paddle.float32, + ) + self.parameter_17 = self.create_parameter( + shape=[1280, 1280], + dtype=paddle.float32, + ) + self.parameter_18 = self.create_parameter( + shape=[1280, 1280], + dtype=paddle.float32, + ) + self.parameter_19 = self.create_parameter( + shape=[1280], + dtype=paddle.float32, + ) + self.parameter_20 = self.create_parameter( + shape=[768, 1280], + dtype=paddle.float32, + ) + self.parameter_21 = self.create_parameter( + shape=[1280], + dtype=paddle.float32, + ) + self.parameter_22 = self.create_parameter( + shape=[1280, 10240], + dtype=paddle.float32, + ) + self.parameter_23 = self.create_parameter( + shape=[1280], + dtype=paddle.float32, + ) + + def forward( + self, + var_0, # (shape: [1, 1280, 1, 1], dtype: paddle.float32, stop_gradient: False) + var_1, # (shape: [], dtype: paddle.int32, stop_gradient: True) + var_2, # (shape: [], dtype: paddle.int32, stop_gradient: True) + var_3, # (shape: [1, 1280, 1, 1], dtype: paddle.float32, stop_gradient: False) + var_4, # (shape: [1, 4, 768], dtype: paddle.float32, stop_gradient: True) + ): + var_5 = paddle.nn.functional.conv.conv2d( + var_0, self.parameter_10, self.parameter_19, [1, 1], 0, [1, 1], 1 + ) + var_6 = var_5.transpose([0, 2, 3, 1]) + var_7 = var_6.flatten(1, 2) + var_8 = paddle.nn.functional.norm.layer_norm( + var_7, + normalized_shape=[1280], + weight=self.parameter_1, + bias=self.parameter_16, + epsilon=1e-05, + ) + var_9 = paddle.nn.functional.common.linear( + x=var_8, weight=self.parameter_5, bias=None, name=None + ) + var_10 = paddle.nn.functional.common.linear( + x=var_8, weight=self.parameter_6, bias=None, name=None + ) + var_11 = paddle.nn.functional.common.linear( + x=var_8, weight=self.parameter_17, bias=None, name=None + ) + var_12 = var_9.reshape([0, 0, 8, 160]) + var_13 = var_12.transpose([0, 2, 1, 3]) + var_14 = var_10.reshape([0, 0, 8, 160]) + var_15 = var_14.transpose([0, 2, 1, 3]) + var_16 = var_11.reshape([0, 0, 8, 160]) + var_17 = var_16.transpose([0, 2, 1, 3]) + var_18 = paddle.tensor.linalg.matmul(var_13, var_15, transpose_y=True) + var_19 = var_18 * 0.07905694150420949 + var_20 = paddle.nn.functional.activation.softmax(var_19, axis=-1) + var_21 = paddle.tensor.linalg.matmul(var_20, var_17) + var_22 = var_21.transpose([0, 2, 1, 3]) + var_23 = var_22.reshape([0, 0, 1280]) + var_24 = paddle.nn.functional.common.linear( + x=var_23, weight=self.parameter_13, bias=self.parameter_3, name=None + ) + var_25 = paddle.nn.functional.common.dropout( + var_24, + p=0.0, + axis=None, + training=True, + mode='upscale_in_train', + name=None, + ) + var_26 = var_25 / 1.0 + var_27 = var_26 + var_7 + var_28 = paddle.nn.functional.norm.layer_norm( + var_27, + normalized_shape=[1280], + weight=self.parameter_11, + bias=self.parameter_21, + epsilon=1e-05, + ) + var_29 = paddle.nn.functional.common.linear( + x=var_28, weight=self.parameter_18, bias=None, name=None + ) + var_30 = paddle.nn.functional.common.linear( + x=var_4, weight=self.parameter_15, bias=None, name=None + ) + var_31 = paddle.nn.functional.common.linear( + x=var_4, weight=self.parameter_20, bias=None, name=None + ) + var_32 = var_29.reshape([0, 0, 8, 160]) + var_33 = var_32.transpose([0, 2, 1, 3]) + var_34 = var_30.reshape([0, 0, 8, 160]) + var_35 = var_34.transpose([0, 2, 1, 3]) + var_36 = var_31.reshape([0, 0, 8, 160]) + var_37 = var_36.transpose([0, 2, 1, 3]) + var_38 = paddle.tensor.linalg.matmul(var_33, var_35, transpose_y=True) + var_39 = var_38 * 0.07905694150420949 + var_40 = paddle.nn.functional.activation.softmax(var_39, axis=-1) + var_41 = paddle.tensor.linalg.matmul(var_40, var_37) + var_42 = var_41.transpose([0, 2, 1, 3]) + var_43 = var_42.reshape([0, 0, 1280]) + var_44 = paddle.nn.functional.common.linear( + x=var_43, weight=self.parameter_0, bias=self.parameter_23, name=None + ) + var_45 = paddle.nn.functional.common.dropout( + var_44, + p=0.0, + axis=None, + training=True, + mode='upscale_in_train', + name=None, + ) + var_46 = var_45 / 1.0 + var_47 = var_46 + var_27 + var_48 = paddle.nn.functional.norm.layer_norm( + var_47, + normalized_shape=[1280], + weight=self.parameter_7, + bias=self.parameter_8, + epsilon=1e-05, + ) + var_49 = paddle.nn.functional.common.linear( + var_48, self.parameter_22, self.parameter_9 + ) + out = var_49.chunk(2, axis=-1) + var_50 = out[0] + var_51 = out[1] + var_52 = paddle.nn.functional.activation.gelu(var_51) + var_53 = var_50 * var_52 + var_54 = paddle.nn.functional.common.dropout( + var_53, + p=0.0, + axis=None, + training=True, + mode='upscale_in_train', + name=None, + ) + var_55 = paddle.nn.functional.common.linear( + var_54, self.parameter_14, self.parameter_2 + ) + var_56 = var_55 + var_47 + var_57 = var_56.reshape([-1, var_1, var_2, 1280]) + var_58 = var_57.transpose([0, 3, 1, 2]) + var_59 = paddle.nn.functional.conv.conv2d( + var_58, self.parameter_12, self.parameter_4, [1, 1], 0, [1, 1], 1 + ) + var_60 = var_59 + var_3 + return var_60 + + +def create_paddle_inputs(): + inputs = ( + paddle.rand(shape=[1, 1280, 1, 1], dtype=paddle.float32), + paddle.randint(low=1, high=2, shape=[1], dtype=paddle.int32), + paddle.randint(low=1, high=2, shape=[1], dtype=paddle.int32), + paddle.rand(shape=[1, 1280, 1, 1], dtype=paddle.float32), + paddle.rand(shape=[1, 4, 768], dtype=paddle.float32), + ) + return inputs + + +def create_numpy_inputs(): + inputs = ( + np.random.random(size=[1, 1280, 1, 1]).astype('float32'), + np.random.randint(low=1, high=2, size=[1], dtype='int32'), + np.random.randint(low=1, high=2, size=[1], dtype='int32'), + np.random.random(size=[1, 1280, 1, 1]).astype('float32'), + np.random.random(size=[1, 4, 768]).astype('float32'), + ) + return inputs + + +class TestLayer(unittest.TestCase): + def setUp(self): + self.inputs = create_paddle_inputs() + self.net = LayerCase() + + def train(self, net, to_static, with_prim=False, with_cinn=False): + if to_static: + paddle.set_flags({'FLAGS_prim_all': with_prim}) + if with_cinn: + build_strategy = paddle.static.BuildStrategy() + build_strategy.build_cinn_pass = True + net = paddle.jit.to_static( + net, build_strategy=build_strategy, full_graph=True + ) + else: + net = paddle.jit.to_static(net, full_graph=True) + paddle.seed(123) + outs = net(*self.inputs) + return outs + + def test_ast_prim_cinn(self): + st_out = self.train(self.net, to_static=True) + cinn_out = self.train( + self.net, to_static=True, with_prim=False, with_cinn=False + ) + for st, cinn in zip( + paddle.utils.flatten(st_out), paddle.utils.flatten(cinn_out) + ): + np.testing.assert_allclose(st.numpy(), cinn.numpy(), atol=1e-8) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_19_st.py b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_19_st.py new file mode 100644 index 0000000000000..ca67cb5d09ba6 --- /dev/null +++ b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_19_st.py @@ -0,0 +1,118 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# repo: diffusers_sub_grpah +# model: stable_diffusion +# api:paddle.nn.functional.activation.silu||api:paddle.nn.functional.conv.conv2d||api:paddle.nn.functional.activation.silu||api:paddle.nn.functional.common.linear||method:__getitem__||method:__add__ +import unittest + +import numpy as np + +import paddle + + +class LayerCase(paddle.nn.Layer): + def __init__(self): + super().__init__() + self.parameter_0 = self.create_parameter( + shape=[1280], + dtype=paddle.float32, + ) + self.parameter_1 = self.create_parameter( + shape=[1280, 1280, 3, 3], + dtype=paddle.float32, + ) + self.parameter_2 = self.create_parameter( + shape=[1280, 1280], + dtype=paddle.float32, + ) + self.parameter_3 = self.create_parameter( + shape=[1280], + dtype=paddle.float32, + ) + + def forward( + self, + var_0, # (shape: [1, 1280, 1, 1], dtype: paddle.float32, stop_gradient: False) + var_1, # (shape: [1, 1280], dtype: paddle.float32, stop_gradient: False) + ): + var_2 = paddle.nn.functional.activation.silu(var_0, None) + var_3 = paddle.nn.functional.conv.conv2d( + var_2, self.parameter_1, self.parameter_3, [1, 1], 1, [1, 1], 1 + ) + var_4 = paddle.nn.functional.activation.silu(var_1, None) + var_5 = paddle.nn.functional.common.linear( + var_4, self.parameter_2, self.parameter_0 + ) + var_6 = var_5[ + ( + slice(None, None, None), + slice(None, None, None), + None, + None, + ) + ] + var_7 = var_3 + var_6 + return var_7, var_6 + + +def create_paddle_inputs(): + inputs = ( + paddle.rand(shape=[1, 1280, 1, 1], dtype=paddle.float32), + paddle.rand(shape=[1, 1280], dtype=paddle.float32), + ) + return inputs + + +def create_numpy_inputs(): + inputs = ( + np.random.random(size=[1, 1280, 1, 1]).astype('float32'), + np.random.random(size=[1, 1280]).astype('float32'), + ) + return inputs + + +class TestLayer(unittest.TestCase): + def setUp(self): + self.inputs = create_paddle_inputs() + self.net = LayerCase() + + def train(self, net, to_static, with_prim=False, with_cinn=False): + if to_static: + paddle.set_flags({'FLAGS_prim_all': with_prim}) + if with_cinn: + build_strategy = paddle.static.BuildStrategy() + build_strategy.build_cinn_pass = True + net = paddle.jit.to_static( + net, build_strategy=build_strategy, full_graph=True + ) + else: + net = paddle.jit.to_static(net, full_graph=True) + paddle.seed(123) + outs = net(*self.inputs) + return outs + + def test_ast_prim_cinn(self): + st_out = self.train(self.net, to_static=True) + cinn_out = self.train( + self.net, to_static=True, with_prim=True, with_cinn=False + ) + for st, cinn in zip( + paddle.utils.flatten(st_out), paddle.utils.flatten(cinn_out) + ): + np.testing.assert_allclose(st.numpy(), cinn.numpy(), atol=1e-8) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_1_st.py b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_1_st.py new file mode 100644 index 0000000000000..d88a21ca6884b --- /dev/null +++ b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_1_st.py @@ -0,0 +1,118 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# repo: diffusers_sub_grpah +# model: stable_diffusion +# api:paddle.nn.functional.activation.silu||api:paddle.nn.functional.common.dropout||api:paddle.nn.functional.conv.conv2d||api:paddle.nn.functional.conv.conv2d||method:__add__||method:__truediv__ +import unittest + +import numpy as np + +import paddle + + +class LayerCase(paddle.nn.Layer): + def __init__(self): + super().__init__() + self.parameter_0 = self.create_parameter( + shape=[512], + dtype=paddle.float32, + ) + self.parameter_1 = self.create_parameter( + shape=[512, 512, 3, 3], + dtype=paddle.float32, + ) + self.parameter_2 = self.create_parameter( + shape=[512, 256, 1, 1], + dtype=paddle.float32, + ) + self.parameter_3 = self.create_parameter( + shape=[512], + dtype=paddle.float32, + ) + + def forward( + self, + var_0, # (shape: [1, 512, 2, 2], dtype: paddle.float32, stop_gradient: True) + var_1, # (shape: [1, 256, 2, 2], dtype: paddle.float32, stop_gradient: True) + ): + var_2 = paddle.nn.functional.activation.silu(var_0, None) + var_3 = paddle.nn.functional.common.dropout( + var_2, + p=0.0, + axis=None, + training=False, + mode='upscale_in_train', + name=None, + ) + var_4 = paddle.nn.functional.conv.conv2d( + var_3, self.parameter_1, self.parameter_3, [1, 1], 1, [1, 1], 1 + ) + var_5 = paddle.nn.functional.conv.conv2d( + var_1, self.parameter_2, self.parameter_0, [1, 1], 0, [1, 1], 1 + ) + var_6 = var_5 + var_4 + var_7 = var_6 / 1.0 + return var_7 + + +def create_paddle_inputs(): + inputs = ( + paddle.rand(shape=[1, 512, 2, 2], dtype=paddle.float32), + paddle.rand(shape=[1, 256, 2, 2], dtype=paddle.float32), + ) + return inputs + + +def create_numpy_inputs(): + inputs = ( + np.random.random(size=[1, 512, 2, 2]).astype('float32'), + np.random.random(size=[1, 256, 2, 2]).astype('float32'), + ) + return inputs + + +class TestLayer(unittest.TestCase): + def setUp(self): + self.inputs = create_paddle_inputs() + self.net = LayerCase() + + def train(self, net, to_static, with_prim=False, with_cinn=False): + if to_static: + paddle.set_flags({'FLAGS_prim_all': with_prim}) + if with_cinn: + build_strategy = paddle.static.BuildStrategy() + build_strategy.build_cinn_pass = True + net = paddle.jit.to_static( + net, build_strategy=build_strategy, full_graph=True + ) + else: + net = paddle.jit.to_static(net, full_graph=True) + paddle.seed(123) + outs = net(*self.inputs) + return outs + + def test_ast_prim_cinn(self): + st_out = self.train(self.net, to_static=True) + cinn_out = self.train( + self.net, to_static=True, with_prim=True, with_cinn=False + ) + for st, cinn in zip( + paddle.utils.flatten(st_out), paddle.utils.flatten(cinn_out) + ): + np.testing.assert_allclose(st.numpy(), cinn.numpy(), atol=1e-8) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_20_st.py b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_20_st.py new file mode 100644 index 0000000000000..2bb3cd1184813 --- /dev/null +++ b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_20_st.py @@ -0,0 +1,107 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# repo: diffusers_sub_grpah +# model: stable_diffusion +# api:paddle.nn.functional.activation.silu||api:paddle.nn.functional.common.dropout||api:paddle.nn.functional.conv.conv2d||method:__add__||method:__truediv__ +import unittest + +import numpy as np + +import paddle + + +class LayerCase(paddle.nn.Layer): + def __init__(self): + super().__init__() + self.parameter_0 = self.create_parameter( + shape=[1280, 1280, 3, 3], + dtype=paddle.float32, + ) + self.parameter_1 = self.create_parameter( + shape=[1280], + dtype=paddle.float32, + ) + + def forward( + self, + var_0, # (shape: [1, 1280, 1, 1], dtype: paddle.float32, stop_gradient: False) + var_1, # (shape: [1, 1280, 1, 1], dtype: paddle.float32, stop_gradient: False) + ): + var_2 = paddle.nn.functional.activation.silu(var_0, None) + var_3 = paddle.nn.functional.common.dropout( + var_2, + p=0.0, + axis=None, + training=True, + mode='upscale_in_train', + name=None, + ) + var_4 = paddle.nn.functional.conv.conv2d( + var_3, self.parameter_0, self.parameter_1, [1, 1], 1, [1, 1], 1 + ) + var_5 = var_1 + var_4 + var_6 = var_5 / 1.0 + return var_6 + + +def create_paddle_inputs(): + inputs = ( + paddle.rand(shape=[1, 1280, 1, 1], dtype=paddle.float32), + paddle.rand(shape=[1, 1280, 1, 1], dtype=paddle.float32), + ) + return inputs + + +def create_numpy_inputs(): + inputs = ( + np.random.random(size=[1, 1280, 1, 1]).astype('float32'), + np.random.random(size=[1, 1280, 1, 1]).astype('float32'), + ) + return inputs + + +class TestLayer(unittest.TestCase): + def setUp(self): + self.inputs = create_paddle_inputs() + self.net = LayerCase() + + def train(self, net, to_static, with_prim=False, with_cinn=False): + if to_static: + paddle.set_flags({'FLAGS_prim_all': with_prim}) + if with_cinn: + build_strategy = paddle.static.BuildStrategy() + build_strategy.build_cinn_pass = True + net = paddle.jit.to_static( + net, build_strategy=build_strategy, full_graph=True + ) + else: + net = paddle.jit.to_static(net, full_graph=True) + paddle.seed(123) + outs = net(*self.inputs) + return outs + + def test_ast_prim_cinn(self): + st_out = self.train(self.net, to_static=True) + cinn_out = self.train( + self.net, to_static=True, with_prim=True, with_cinn=True + ) + for st, cinn in zip( + paddle.utils.flatten(st_out), paddle.utils.flatten(cinn_out) + ): + np.testing.assert_allclose(st.numpy(), cinn.numpy(), atol=1e-8) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_21_st.py b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_21_st.py new file mode 100644 index 0000000000000..91732ee2cd266 --- /dev/null +++ b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_21_st.py @@ -0,0 +1,118 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# repo: diffusers_sub_grpah +# model: stable_diffusion +# api:paddle.nn.functional.activation.silu||api:paddle.nn.functional.conv.conv2d||api:paddle.nn.functional.activation.silu||api:paddle.nn.functional.common.linear||method:__getitem__||method:__add__ +import unittest + +import numpy as np + +import paddle + + +class LayerCase(paddle.nn.Layer): + def __init__(self): + super().__init__() + self.parameter_0 = self.create_parameter( + shape=[1280], + dtype=paddle.float32, + ) + self.parameter_1 = self.create_parameter( + shape=[1280], + dtype=paddle.float32, + ) + self.parameter_2 = self.create_parameter( + shape=[1280, 2560, 3, 3], + dtype=paddle.float32, + ) + self.parameter_3 = self.create_parameter( + shape=[1280, 1280], + dtype=paddle.float32, + ) + + def forward( + self, + var_0, # (shape: [1, 2560, 1, 1], dtype: paddle.float32, stop_gradient: False) + var_1, # (shape: [1, 1280], dtype: paddle.float32, stop_gradient: False) + ): + var_2 = paddle.nn.functional.activation.silu(var_0, None) + var_3 = paddle.nn.functional.conv.conv2d( + var_2, self.parameter_2, self.parameter_0, [1, 1], 1, [1, 1], 1 + ) + var_4 = paddle.nn.functional.activation.silu(var_1, None) + var_5 = paddle.nn.functional.common.linear( + var_4, self.parameter_3, self.parameter_1 + ) + var_6 = var_5[ + ( + slice(None, None, None), + slice(None, None, None), + None, + None, + ) + ] + var_7 = var_3 + var_6 + return var_7, var_6 + + +def create_paddle_inputs(): + inputs = ( + paddle.rand(shape=[1, 2560, 1, 1], dtype=paddle.float32), + paddle.rand(shape=[1, 1280], dtype=paddle.float32), + ) + return inputs + + +def create_numpy_inputs(): + inputs = ( + np.random.random(size=[1, 2560, 1, 1]).astype('float32'), + np.random.random(size=[1, 1280]).astype('float32'), + ) + return inputs + + +class TestLayer(unittest.TestCase): + def setUp(self): + self.inputs = create_paddle_inputs() + self.net = LayerCase() + + def train(self, net, to_static, with_prim=False, with_cinn=False): + if to_static: + paddle.set_flags({'FLAGS_prim_all': with_prim}) + if with_cinn: + build_strategy = paddle.static.BuildStrategy() + build_strategy.build_cinn_pass = True + net = paddle.jit.to_static( + net, build_strategy=build_strategy, full_graph=True + ) + else: + net = paddle.jit.to_static(net, full_graph=True) + paddle.seed(123) + outs = net(*self.inputs) + return outs + + def test_ast_prim_cinn(self): + st_out = self.train(self.net, to_static=True) + cinn_out = self.train( + self.net, to_static=True, with_prim=True, with_cinn=False + ) + for st, cinn in zip( + paddle.utils.flatten(st_out), paddle.utils.flatten(cinn_out) + ): + np.testing.assert_allclose(st.numpy(), cinn.numpy(), atol=1e-8) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_22_st.py b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_22_st.py new file mode 100644 index 0000000000000..b9415782e0ed5 --- /dev/null +++ b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_22_st.py @@ -0,0 +1,118 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# repo: diffusers_sub_grpah +# model: stable_diffusion +# api:paddle.nn.functional.activation.silu||api:paddle.nn.functional.common.dropout||api:paddle.nn.functional.conv.conv2d||api:paddle.nn.functional.conv.conv2d||method:__add__||method:__truediv__ +import unittest + +import numpy as np + +import paddle + + +class LayerCase(paddle.nn.Layer): + def __init__(self): + super().__init__() + self.parameter_0 = self.create_parameter( + shape=[1280, 2560, 1, 1], + dtype=paddle.float32, + ) + self.parameter_1 = self.create_parameter( + shape=[1280], + dtype=paddle.float32, + ) + self.parameter_2 = self.create_parameter( + shape=[1280], + dtype=paddle.float32, + ) + self.parameter_3 = self.create_parameter( + shape=[1280, 1280, 3, 3], + dtype=paddle.float32, + ) + + def forward( + self, + var_0, # (shape: [1, 1280, 1, 1], dtype: paddle.float32, stop_gradient: False) + var_1, # (shape: [1, 2560, 1, 1], dtype: paddle.float32, stop_gradient: False) + ): + var_2 = paddle.nn.functional.activation.silu(var_0, None) + var_3 = paddle.nn.functional.common.dropout( + var_2, + p=0.0, + axis=None, + training=True, + mode='upscale_in_train', + name=None, + ) + var_4 = paddle.nn.functional.conv.conv2d( + var_3, self.parameter_3, self.parameter_2, [1, 1], 1, [1, 1], 1 + ) + var_5 = paddle.nn.functional.conv.conv2d( + var_1, self.parameter_0, self.parameter_1, [1, 1], 0, [1, 1], 1 + ) + var_6 = var_5 + var_4 + var_7 = var_6 / 1.0 + return var_7 + + +def create_paddle_inputs(): + inputs = ( + paddle.rand(shape=[1, 1280, 1, 1], dtype=paddle.float32), + paddle.rand(shape=[1, 2560, 1, 1], dtype=paddle.float32), + ) + return inputs + + +def create_numpy_inputs(): + inputs = ( + np.random.random(size=[1, 1280, 1, 1]).astype('float32'), + np.random.random(size=[1, 2560, 1, 1]).astype('float32'), + ) + return inputs + + +class TestLayer(unittest.TestCase): + def setUp(self): + self.inputs = create_paddle_inputs() + self.net = LayerCase() + + def train(self, net, to_static, with_prim=False, with_cinn=False): + if to_static: + paddle.set_flags({'FLAGS_prim_all': with_prim}) + if with_cinn: + build_strategy = paddle.static.BuildStrategy() + build_strategy.build_cinn_pass = True + net = paddle.jit.to_static( + net, build_strategy=build_strategy, full_graph=True + ) + else: + net = paddle.jit.to_static(net, full_graph=True) + paddle.seed(123) + outs = net(*self.inputs) + return outs + + def test_ast_prim_cinn(self): + st_out = self.train(self.net, to_static=True) + cinn_out = self.train( + self.net, to_static=True, with_prim=True, with_cinn=False + ) + for st, cinn in zip( + paddle.utils.flatten(st_out), paddle.utils.flatten(cinn_out) + ): + np.testing.assert_allclose(st.numpy(), cinn.numpy(), atol=1e-8) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_23_st.py b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_23_st.py new file mode 100644 index 0000000000000..876066b9ac10c --- /dev/null +++ b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_23_st.py @@ -0,0 +1,99 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# repo: diffusers_sub_grpah +# model: stable_diffusion +# api:paddle.nn.functional.common.interpolate||api:paddle.nn.functional.conv.conv2d +import unittest + +import numpy as np + +import paddle + + +class LayerCase(paddle.nn.Layer): + def __init__(self): + super().__init__() + self.parameter_0 = self.create_parameter( + shape=[1280, 1280, 3, 3], + dtype=paddle.float32, + ) + self.parameter_1 = self.create_parameter( + shape=[1280], + dtype=paddle.float32, + ) + + def forward( + self, + var_0, # (shape: [1, 1280, 1, 1], dtype: paddle.float32, stop_gradient: False) + var_1, # (shape: [2], dtype: paddle.int32, stop_gradient: True) + ): + var_2 = paddle.nn.functional.common.interpolate( + var_0, size=var_1, mode='nearest' + ) + var_3 = paddle.nn.functional.conv.conv2d( + var_2, self.parameter_0, self.parameter_1, [1, 1], 1, [1, 1], 1 + ) + return var_3 + + +def create_paddle_inputs(): + inputs = ( + paddle.rand(shape=[1, 1280, 1, 1], dtype=paddle.float32), + paddle.randint(low=0, high=10, shape=[2], dtype=paddle.int32), + ) + return inputs + + +def create_numpy_inputs(): + inputs = ( + np.random.random(size=[1, 1280, 1, 1]).astype('float32'), + np.random.randint(low=0, high=10, size=[2], dtype='int32'), + ) + return inputs + + +class TestLayer(unittest.TestCase): + def setUp(self): + self.inputs = create_paddle_inputs() + self.net = LayerCase() + + def train(self, net, to_static, with_prim=False, with_cinn=False): + if to_static: + paddle.set_flags({'FLAGS_prim_all': with_prim}) + if with_cinn: + build_strategy = paddle.static.BuildStrategy() + build_strategy.build_cinn_pass = True + net = paddle.jit.to_static( + net, build_strategy=build_strategy, full_graph=True + ) + else: + net = paddle.jit.to_static(net, full_graph=True) + paddle.seed(123) + outs = net(*self.inputs) + return outs + + def test_ast_prim_cinn(self): + st_out = self.train(self.net, to_static=True) + cinn_out = self.train( + self.net, to_static=True, with_prim=True, with_cinn=False + ) + for st, cinn in zip( + paddle.utils.flatten(st_out), paddle.utils.flatten(cinn_out) + ): + np.testing.assert_allclose(st.numpy(), cinn.numpy(), atol=1e-8) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_24_st.py b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_24_st.py new file mode 100644 index 0000000000000..20d7dab638ad6 --- /dev/null +++ b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_24_st.py @@ -0,0 +1,99 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# repo: diffusers_sub_grpah +# model: stable_diffusion +# api:paddle.nn.functional.common.interpolate||api:paddle.nn.functional.conv.conv2d +import unittest + +import numpy as np + +import paddle + + +class LayerCase(paddle.nn.Layer): + def __init__(self): + super().__init__() + self.parameter_0 = self.create_parameter( + shape=[640, 640, 3, 3], + dtype=paddle.float32, + ) + self.parameter_1 = self.create_parameter( + shape=[640], + dtype=paddle.float32, + ) + + def forward( + self, + var_0, # (shape: [1, 640, 1, 1], dtype: paddle.float32, stop_gradient: False) + var_1, # (shape: [2], dtype: paddle.int32, stop_gradient: True) + ): + var_2 = paddle.nn.functional.common.interpolate( + var_0, size=var_1, mode='nearest' + ) + var_3 = paddle.nn.functional.conv.conv2d( + var_2, self.parameter_0, self.parameter_1, [1, 1], 1, [1, 1], 1 + ) + return var_3 + + +def create_paddle_inputs(): + inputs = ( + paddle.rand(shape=[1, 640, 1, 1], dtype=paddle.float32), + paddle.randint(low=0, high=10, shape=[2], dtype=paddle.int32), + ) + return inputs + + +def create_numpy_inputs(): + inputs = ( + np.random.random(size=[1, 640, 1, 1]).astype('float32'), + np.random.randint(low=0, high=10, size=[2], dtype='int32'), + ) + return inputs + + +class TestLayer(unittest.TestCase): + def setUp(self): + self.inputs = create_paddle_inputs() + self.net = LayerCase() + + def train(self, net, to_static, with_prim=False, with_cinn=False): + if to_static: + paddle.set_flags({'FLAGS_prim_all': with_prim}) + if with_cinn: + build_strategy = paddle.static.BuildStrategy() + build_strategy.build_cinn_pass = True + net = paddle.jit.to_static( + net, build_strategy=build_strategy, full_graph=True + ) + else: + net = paddle.jit.to_static(net, full_graph=True) + paddle.seed(123) + outs = net(*self.inputs) + return outs + + def test_ast_prim_cinn(self): + st_out = self.train(self.net, to_static=True) + cinn_out = self.train( + self.net, to_static=True, with_prim=True, with_cinn=False + ) + for st, cinn in zip( + paddle.utils.flatten(st_out), paddle.utils.flatten(cinn_out) + ): + np.testing.assert_allclose(st.numpy(), cinn.numpy(), atol=1e-8) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_25_st.py b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_25_st.py new file mode 100644 index 0000000000000..2a5536496675a --- /dev/null +++ b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_25_st.py @@ -0,0 +1,92 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# repo: diffusers_sub_grpah +# model: stable_diffusion +# method:cast||method:cast||api:paddle.nn.functional.loss.mse_loss||method:mean||method:mean +import unittest + +import numpy as np + +import paddle + + +class LayerCase(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + var_0, # (shape: [1, 4, 1, 1], dtype: paddle.float32, stop_gradient: True) + var_1, # (shape: [1, 4, 1, 1], dtype: paddle.float32, stop_gradient: False) + ): + var_2 = var_1.cast('float32') + var_3 = var_0.cast('float32') + var_4 = paddle.nn.functional.loss.mse_loss( + var_2, var_3, reduction='none' + ) + var_5 = var_4.mean([1, 2, 3]) + var_6 = var_5.mean() + return var_6 + + +def create_paddle_inputs(): + inputs = ( + paddle.rand(shape=[1, 4, 1, 1], dtype=paddle.float32), + paddle.rand(shape=[1, 4, 1, 1], dtype=paddle.float32), + ) + return inputs + + +def create_numpy_inputs(): + inputs = ( + np.random.random(size=[1, 4, 1, 1]).astype('float32'), + np.random.random(size=[1, 4, 1, 1]).astype('float32'), + ) + return inputs + + +class TestLayer(unittest.TestCase): + def setUp(self): + self.inputs = create_paddle_inputs() + self.net = LayerCase() + + def train(self, net, to_static, with_prim=False, with_cinn=False): + if to_static: + paddle.set_flags({'FLAGS_prim_all': with_prim}) + if with_cinn: + build_strategy = paddle.static.BuildStrategy() + build_strategy.build_cinn_pass = True + net = paddle.jit.to_static( + net, build_strategy=build_strategy, full_graph=True + ) + else: + net = paddle.jit.to_static(net, full_graph=True) + paddle.seed(123) + outs = net(*self.inputs) + return outs + + def test_ast_prim_cinn(self): + st_out = self.train(self.net, to_static=True) + cinn_out = self.train( + self.net, to_static=True, with_prim=True, with_cinn=False + ) + for st, cinn in zip( + paddle.utils.flatten(st_out), paddle.utils.flatten(cinn_out) + ): + np.testing.assert_allclose(st.numpy(), cinn.numpy(), atol=1e-8) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_2_st.py b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_2_st.py new file mode 100644 index 0000000000000..1a5570de3874f --- /dev/null +++ b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_2_st.py @@ -0,0 +1,156 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# repo: diffusers_sub_grpah +# model: stable_diffusion +# method:transpose||api:paddle.nn.functional.common.linear||api:paddle.nn.functional.common.linear||api:paddle.nn.functional.common.linear||method:reshape||method:transpose||method:reshape||method:transpose||method:reshape||method:transpose||api:paddle.tensor.linalg.matmul||method:__mul__||method:cast||api:paddle.nn.functional.activation.softmax||method:cast||api:paddle.tensor.linalg.matmul||method:transpose||method:reshape||api:paddle.nn.functional.common.linear||api:paddle.nn.functional.common.dropout||method:transpose||method:reshape||method:__add__||method:__truediv__ +import unittest + +import numpy as np + +import paddle + + +class LayerCase(paddle.nn.Layer): + def __init__(self): + super().__init__() + self.parameter_0 = self.create_parameter( + shape=[512, 512], + dtype=paddle.float32, + ) + self.parameter_1 = self.create_parameter( + shape=[512], + dtype=paddle.float32, + ) + self.parameter_2 = self.create_parameter( + shape=[512, 512], + dtype=paddle.float32, + ) + self.parameter_3 = self.create_parameter( + shape=[512, 512], + dtype=paddle.float32, + ) + self.parameter_4 = self.create_parameter( + shape=[512], + dtype=paddle.float32, + ) + self.parameter_5 = self.create_parameter( + shape=[512, 512], + dtype=paddle.float32, + ) + self.parameter_6 = self.create_parameter( + shape=[512], + dtype=paddle.float32, + ) + self.parameter_7 = self.create_parameter( + shape=[512], + dtype=paddle.float32, + ) + + def forward( + self, + var_0, # (shape: [1, 512, 1], dtype: paddle.float32, stop_gradient: True) + var_1, # (shape: [1, 512, 1, 1], dtype: paddle.float32, stop_gradient: True) + ): + var_2 = var_0.transpose([0, 2, 1]) + var_3 = paddle.nn.functional.common.linear( + x=var_2, weight=self.parameter_0, bias=self.parameter_6, name=None + ) + var_4 = paddle.nn.functional.common.linear( + x=var_2, weight=self.parameter_2, bias=self.parameter_1, name=None + ) + var_5 = paddle.nn.functional.common.linear( + x=var_2, weight=self.parameter_5, bias=self.parameter_4, name=None + ) + var_6 = var_3.reshape([0, 0, 1, 512]) + var_7 = var_6.transpose([0, 2, 1, 3]) + var_8 = var_4.reshape([0, 0, 1, 512]) + var_9 = var_8.transpose([0, 2, 1, 3]) + var_10 = var_5.reshape([0, 0, 1, 512]) + var_11 = var_10.transpose([0, 2, 1, 3]) + var_12 = paddle.tensor.linalg.matmul(var_7, var_9, transpose_y=True) + var_13 = var_12 * 0.04419417382415922 + var_14 = var_13.cast('float32') + var_15 = paddle.nn.functional.activation.softmax(var_14, axis=-1) + var_16 = var_15.cast('float32') + var_17 = paddle.tensor.linalg.matmul(var_16, var_11) + var_18 = var_17.transpose([0, 2, 1, 3]) + var_19 = var_18.reshape([0, 0, 512]) + var_20 = paddle.nn.functional.common.linear( + x=var_19, weight=self.parameter_3, bias=self.parameter_7, name=None + ) + var_21 = paddle.nn.functional.common.dropout( + var_20, + p=0.0, + axis=None, + training=False, + mode='upscale_in_train', + name=None, + ) + var_22 = var_21.transpose([0, 2, 1]) + var_23 = var_22.reshape([1, 512, 1, 1]) + var_24 = var_23 + var_1 + var_25 = var_24 / 1 + return var_25 + + +def create_paddle_inputs(): + inputs = ( + paddle.rand(shape=[1, 512, 1], dtype=paddle.float32), + paddle.rand(shape=[1, 512, 1, 1], dtype=paddle.float32), + ) + return inputs + + +def create_numpy_inputs(): + inputs = ( + np.random.random(size=[1, 512, 1]).astype('float32'), + np.random.random(size=[1, 512, 1, 1]).astype('float32'), + ) + return inputs + + +class TestLayer(unittest.TestCase): + def setUp(self): + self.inputs = create_paddle_inputs() + self.net = LayerCase() + + def train(self, net, to_static, with_prim=False, with_cinn=False): + if to_static: + paddle.set_flags({'FLAGS_prim_all': with_prim}) + if with_cinn: + build_strategy = paddle.static.BuildStrategy() + build_strategy.build_cinn_pass = True + net = paddle.jit.to_static( + net, build_strategy=build_strategy, full_graph=True + ) + else: + net = paddle.jit.to_static(net, full_graph=True) + paddle.seed(123) + outs = net(*self.inputs) + return outs + + def test_ast_prim_cinn(self): + st_out = self.train(self.net, to_static=True) + cinn_out = self.train( + self.net, to_static=True, with_prim=True, with_cinn=False + ) + for st, cinn in zip( + paddle.utils.flatten(st_out), paddle.utils.flatten(cinn_out) + ): + np.testing.assert_allclose(st.numpy(), cinn.numpy(), atol=1e-8) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_3_st.py b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_3_st.py new file mode 100644 index 0000000000000..ababaf8eddebb --- /dev/null +++ b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_3_st.py @@ -0,0 +1,85 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# repo: diffusers_sub_grpah +# model: stable_diffusion +# api:paddle.tensor.manipulation.chunk||api:paddle.tensor.math.clip||method:__rmul__||api:paddle.tensor.ops.exp||api:paddle.tensor.ops.exp +import unittest + +import numpy as np + +import paddle + + +class LayerCase(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + var_0, # (shape: [1, 8, 1, 1], dtype: paddle.float32, stop_gradient: True) + ): + out = paddle.tensor.manipulation.chunk(var_0, 2, axis=1) + var_1 = out[0] + var_2 = out[1] + var_3 = paddle.tensor.math.clip(var_2, -30.0, 20.0) + var_4 = 0.5 * var_3 + var_5 = paddle.tensor.ops.exp(var_4) + var_6 = paddle.tensor.ops.exp(var_3) + return var_1, var_2, var_3, var_5, var_6 + + +def create_paddle_inputs(): + inputs = (paddle.rand(shape=[1, 8, 1, 1], dtype=paddle.float32),) + return inputs + + +def create_numpy_inputs(): + inputs = (np.random.random(size=[1, 8, 1, 1]).astype('float32'),) + return inputs + + +class TestLayer(unittest.TestCase): + def setUp(self): + self.inputs = create_paddle_inputs() + self.net = LayerCase() + + def train(self, net, to_static, with_prim=False, with_cinn=False): + if to_static: + paddle.set_flags({'FLAGS_prim_all': with_prim}) + if with_cinn: + build_strategy = paddle.static.BuildStrategy() + build_strategy.build_cinn_pass = True + net = paddle.jit.to_static( + net, build_strategy=build_strategy, full_graph=True + ) + else: + net = paddle.jit.to_static(net, full_graph=True) + paddle.seed(123) + outs = net(*self.inputs) + return outs + + def test_ast_prim_cinn(self): + st_out = self.train(self.net, to_static=True) + cinn_out = self.train( + self.net, to_static=True, with_prim=True, with_cinn=True + ) + for st, cinn in zip( + paddle.utils.flatten(st_out), paddle.utils.flatten(cinn_out) + ): + np.testing.assert_allclose(st.numpy(), cinn.numpy(), atol=1e-8) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_4_st.py b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_4_st.py new file mode 100644 index 0000000000000..e1f5c387c2e53 --- /dev/null +++ b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_4_st.py @@ -0,0 +1,114 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# repo: diffusers_sub_grpah +# model: stable_diffusion +# api:ppdiffusers.utils.paddle_utils.randn_pt||method:__mul__||method:__add__||method:__mul__||api:ppdiffusers.utils.paddle_utils.randn_pt||api:ppdiffusers.utils.paddle_utils.randint_pt||method:cast||method:__getitem__||method:__pow__||method:flatten||method:unsqueeze||method:unsqueeze||method:unsqueeze||method:__getitem__||method:__rsub__||method:__pow__||method:flatten||method:unsqueeze||method:unsqueeze||method:unsqueeze||method:__mul__||method:__mul__||method:__add__ +import unittest + +import numpy as np +import ppdiffusers + +import paddle + + +class LayerCase(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + var_0, # (shape: [1, 4, 1, 1], dtype: paddle.float32, stop_gradient: True) + var_1, # (shape: [1, 4, 1, 1], dtype: paddle.float32, stop_gradient: True) + var_2, # (shape: [1000], dtype: paddle.float32, stop_gradient: True) + ): + var_3 = ppdiffusers.utils.paddle_utils.randn_pt( + [1, 4, 1, 1], generator=None, dtype='float32' + ) + var_4 = var_1 * var_3 + var_5 = var_0 + var_4 + var_6 = var_5 * 0.18215 + var_7 = ppdiffusers.utils.paddle_utils.randn_pt([1, 4, 1, 1]) + var_8 = ppdiffusers.utils.paddle_utils.randint_pt(0, 1000, (1,)) + var_9 = var_8.cast('int64') + var_10 = var_2[var_9] + var_11 = var_10**0.5 + var_12 = var_11.flatten() + var_13 = var_12.unsqueeze(-1) + var_14 = var_13.unsqueeze(-1) + var_15 = var_14.unsqueeze(-1) + var_16 = var_2[var_9] + var_17 = 1 - var_16 + var_18 = var_17**0.5 + var_19 = var_18.flatten() + var_20 = var_19.unsqueeze(-1) + var_21 = var_20.unsqueeze(-1) + var_22 = var_21.unsqueeze(-1) + var_23 = var_15 * var_6 + var_24 = var_22 * var_7 + var_25 = var_23 + var_24 + return var_25, var_9, var_6, var_7 + + +def create_paddle_inputs(): + inputs = ( + paddle.rand(shape=[1, 4, 1, 1], dtype=paddle.float32), + paddle.rand(shape=[1, 4, 1, 1], dtype=paddle.float32), + paddle.rand(shape=[1000], dtype=paddle.float32), + ) + return inputs + + +def create_numpy_inputs(): + inputs = ( + np.random.random(size=[1, 4, 1, 1]).astype('float32'), + np.random.random(size=[1, 4, 1, 1]).astype('float32'), + np.random.random(size=[1000]).astype('float32'), + ) + return inputs + + +class TestLayer(unittest.TestCase): + def setUp(self): + self.inputs = create_paddle_inputs() + self.net = LayerCase() + + def train(self, net, to_static, with_prim=False, with_cinn=False): + if to_static: + paddle.set_flags({'FLAGS_prim_all': with_prim}) + if with_cinn: + build_strategy = paddle.static.BuildStrategy() + build_strategy.build_cinn_pass = True + net = paddle.jit.to_static( + net, build_strategy=build_strategy, full_graph=True + ) + else: + net = paddle.jit.to_static(net, full_graph=True) + paddle.seed(123) + outs = net(*self.inputs) + return outs + + def test_ast_prim_cinn(self): + st_out = self.train(self.net, to_static=True) + cinn_out = self.train( + self.net, to_static=True, with_prim=True, with_cinn=False + ) + for st, cinn in zip( + paddle.utils.flatten(st_out), paddle.utils.flatten(cinn_out) + ): + np.testing.assert_allclose(st.numpy(), cinn.numpy(), atol=1e-8) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_5_st.py b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_5_st.py new file mode 100644 index 0000000000000..e9a3ae24f284f --- /dev/null +++ b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_5_st.py @@ -0,0 +1,113 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# repo: diffusers_sub_grpah +# model: stable_diffusion +# api:paddle.tensor.creation.arange||method:__rmul__||method:__truediv__||api:paddle.tensor.ops.exp||method:__getitem__||method:cast||method:__getitem__||method:__mul__||method:__rmul__||api:paddle.tensor.ops.sin||api:paddle.tensor.ops.cos||api:paddle.tensor.manipulation.concat||method:__getitem__||method:__getitem__||api:paddle.tensor.manipulation.concat +import unittest + +import numpy as np + +import paddle + + +class LayerCase(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + var_0, # (shape: [1], dtype: paddle.int64, stop_gradient: True) + ): + var_1 = paddle.tensor.creation.arange(start=0, end=160, dtype='float32') + var_2 = -9.210340371976184 * var_1 + var_3 = var_2 / 160 + var_4 = paddle.tensor.ops.exp(var_3) + var_5 = var_0[ + ( + slice(None, None, None), + None, + ) + ] + var_6 = var_5.cast('float32') + var_7 = var_4[ + ( + None, + slice(None, None, None), + ) + ] + var_8 = var_6 * var_7 + var_9 = 1 * var_8 + var_10 = paddle.tensor.ops.sin(var_9) + var_11 = paddle.tensor.ops.cos(var_9) + var_12 = paddle.tensor.manipulation.concat([var_10, var_11], axis=-1) + var_13 = var_12[ + ( + slice(None, None, None), + slice(160, None, None), + ) + ] + var_14 = var_12[ + ( + slice(None, None, None), + slice(None, 160, None), + ) + ] + var_15 = paddle.tensor.manipulation.concat([var_13, var_14], axis=-1) + return var_15 + + +def create_paddle_inputs(): + inputs = (paddle.randint(low=0, high=10, shape=[1], dtype=paddle.int64),) + return inputs + + +def create_numpy_inputs(): + inputs = (np.random.randint(low=0, high=10, size=[1], dtype='int64'),) + return inputs + + +class TestLayer(unittest.TestCase): + def setUp(self): + self.inputs = create_paddle_inputs() + self.net = LayerCase() + + def train(self, net, to_static, with_prim=False, with_cinn=False): + if to_static: + paddle.set_flags({'FLAGS_prim_all': with_prim}) + if with_cinn: + build_strategy = paddle.static.BuildStrategy() + build_strategy.build_cinn_pass = True + net = paddle.jit.to_static( + net, build_strategy=build_strategy, full_graph=True + ) + else: + net = paddle.jit.to_static(net, full_graph=True) + paddle.seed(123) + outs = net(*self.inputs) + return outs + + def test_ast_prim_cinn(self): + st_out = self.train(self.net, to_static=True) + cinn_out = self.train( + self.net, to_static=True, with_prim=True, with_cinn=False + ) + for st, cinn in zip( + paddle.utils.flatten(st_out), paddle.utils.flatten(cinn_out) + ): + np.testing.assert_allclose(st.numpy(), cinn.numpy(), atol=1e-8) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_6_st.py b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_6_st.py new file mode 100644 index 0000000000000..817e19aab0412 --- /dev/null +++ b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_6_st.py @@ -0,0 +1,101 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# repo: diffusers_sub_grpah +# model: stable_diffusion +# api:paddle.nn.functional.common.linear||api:paddle.nn.functional.activation.silu||api:paddle.nn.functional.common.linear +import unittest + +import numpy as np + +import paddle + + +class LayerCase(paddle.nn.Layer): + def __init__(self): + super().__init__() + self.parameter_0 = self.create_parameter( + shape=[1280], + dtype=paddle.float32, + ) + self.parameter_1 = self.create_parameter( + shape=[1280, 1280], + dtype=paddle.float32, + ) + self.parameter_2 = self.create_parameter( + shape=[320, 1280], + dtype=paddle.float32, + ) + self.parameter_3 = self.create_parameter( + shape=[1280], + dtype=paddle.float32, + ) + + def forward( + self, + var_0, # (shape: [1, 320], dtype: paddle.float32, stop_gradient: True) + ): + var_1 = paddle.nn.functional.common.linear( + x=var_0, weight=self.parameter_2, bias=self.parameter_0, name=None + ) + var_2 = paddle.nn.functional.activation.silu(var_1, None) + var_3 = paddle.nn.functional.common.linear( + x=var_2, weight=self.parameter_1, bias=self.parameter_3, name=None + ) + return var_3 + + +def create_paddle_inputs(): + inputs = (paddle.rand(shape=[1, 320], dtype=paddle.float32),) + return inputs + + +def create_numpy_inputs(): + inputs = (np.random.random(size=[1, 320]).astype('float32'),) + return inputs + + +class TestLayer(unittest.TestCase): + def setUp(self): + self.inputs = create_paddle_inputs() + self.net = LayerCase() + + def train(self, net, to_static, with_prim=False, with_cinn=False): + if to_static: + paddle.set_flags({'FLAGS_prim_all': with_prim}) + if with_cinn: + build_strategy = paddle.static.BuildStrategy() + build_strategy.build_cinn_pass = True + net = paddle.jit.to_static( + net, build_strategy=build_strategy, full_graph=True + ) + else: + net = paddle.jit.to_static(net, full_graph=True) + paddle.seed(123) + outs = net(*self.inputs) + return outs + + def test_ast_prim_cinn(self): + st_out = self.train(self.net, to_static=True) + cinn_out = self.train( + self.net, to_static=True, with_prim=True, with_cinn=False + ) + for st, cinn in zip( + paddle.utils.flatten(st_out), paddle.utils.flatten(cinn_out) + ): + np.testing.assert_allclose(st.numpy(), cinn.numpy(), atol=1e-8) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_7_st.py b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_7_st.py new file mode 100644 index 0000000000000..7c14c468c4b42 --- /dev/null +++ b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_7_st.py @@ -0,0 +1,118 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# repo: diffusers_sub_grpah +# model: stable_diffusion +# api:paddle.nn.functional.activation.silu||api:paddle.nn.functional.conv.conv2d||api:paddle.nn.functional.activation.silu||api:paddle.nn.functional.common.linear||method:__getitem__||method:__add__ +import unittest + +import numpy as np + +import paddle + + +class LayerCase(paddle.nn.Layer): + def __init__(self): + super().__init__() + self.parameter_0 = self.create_parameter( + shape=[320, 320, 3, 3], + dtype=paddle.float32, + ) + self.parameter_1 = self.create_parameter( + shape=[320], + dtype=paddle.float32, + ) + self.parameter_2 = self.create_parameter( + shape=[320], + dtype=paddle.float32, + ) + self.parameter_3 = self.create_parameter( + shape=[1280, 320], + dtype=paddle.float32, + ) + + def forward( + self, + var_0, # (shape: [1, 320, 1, 1], dtype: paddle.float32, stop_gradient: False) + var_1, # (shape: [1, 1280], dtype: paddle.float32, stop_gradient: False) + ): + var_2 = paddle.nn.functional.activation.silu(var_0, None) + var_3 = paddle.nn.functional.conv.conv2d( + var_2, self.parameter_0, self.parameter_2, [1, 1], 1, [1, 1], 1 + ) + var_4 = paddle.nn.functional.activation.silu(var_1, None) + var_5 = paddle.nn.functional.common.linear( + var_4, self.parameter_3, self.parameter_1 + ) + var_6 = var_5[ + ( + slice(None, None, None), + slice(None, None, None), + None, + None, + ) + ] + var_7 = var_3 + var_6 + return var_7, var_6 + + +def create_paddle_inputs(): + inputs = ( + paddle.rand(shape=[1, 320, 1, 1], dtype=paddle.float32), + paddle.rand(shape=[1, 1280], dtype=paddle.float32), + ) + return inputs + + +def create_numpy_inputs(): + inputs = ( + np.random.random(size=[1, 320, 1, 1]).astype('float32'), + np.random.random(size=[1, 1280]).astype('float32'), + ) + return inputs + + +class TestLayer(unittest.TestCase): + def setUp(self): + self.inputs = create_paddle_inputs() + self.net = LayerCase() + + def train(self, net, to_static, with_prim=False, with_cinn=False): + if to_static: + paddle.set_flags({'FLAGS_prim_all': with_prim}) + if with_cinn: + build_strategy = paddle.static.BuildStrategy() + build_strategy.build_cinn_pass = True + net = paddle.jit.to_static( + net, build_strategy=build_strategy, full_graph=True + ) + else: + net = paddle.jit.to_static(net, full_graph=True) + paddle.seed(123) + outs = net(*self.inputs) + return outs + + def test_ast_prim_cinn(self): + st_out = self.train(self.net, to_static=True) + cinn_out = self.train( + self.net, to_static=True, with_prim=True, with_cinn=False + ) + for st, cinn in zip( + paddle.utils.flatten(st_out), paddle.utils.flatten(cinn_out) + ): + np.testing.assert_allclose(st.numpy(), cinn.numpy(), atol=1e-8) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_8_st.py b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_8_st.py new file mode 100644 index 0000000000000..fe44a18a7cd90 --- /dev/null +++ b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_8_st.py @@ -0,0 +1,107 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# repo: diffusers_sub_grpah +# model: stable_diffusion +# api:paddle.nn.functional.activation.silu||api:paddle.nn.functional.common.dropout||api:paddle.nn.functional.conv.conv2d||method:__add__||method:__truediv__ +import unittest + +import numpy as np + +import paddle + + +class LayerCase(paddle.nn.Layer): + def __init__(self): + super().__init__() + self.parameter_0 = self.create_parameter( + shape=[320], + dtype=paddle.float32, + ) + self.parameter_1 = self.create_parameter( + shape=[320, 320, 3, 3], + dtype=paddle.float32, + ) + + def forward( + self, + var_0, # (shape: [1, 320, 1, 1], dtype: paddle.float32, stop_gradient: False) + var_1, # (shape: [1, 320, 1, 1], dtype: paddle.float32, stop_gradient: False) + ): + var_2 = paddle.nn.functional.activation.silu(var_0, None) + var_3 = paddle.nn.functional.common.dropout( + var_2, + p=0.0, + axis=None, + training=True, + mode='upscale_in_train', + name=None, + ) + var_4 = paddle.nn.functional.conv.conv2d( + var_3, self.parameter_1, self.parameter_0, [1, 1], 1, [1, 1], 1 + ) + var_5 = var_1 + var_4 + var_6 = var_5 / 1.0 + return var_6 + + +def create_paddle_inputs(): + inputs = ( + paddle.rand(shape=[1, 320, 1, 1], dtype=paddle.float32), + paddle.rand(shape=[1, 320, 1, 1], dtype=paddle.float32), + ) + return inputs + + +def create_numpy_inputs(): + inputs = ( + np.random.random(size=[1, 320, 1, 1]).astype('float32'), + np.random.random(size=[1, 320, 1, 1]).astype('float32'), + ) + return inputs + + +class TestLayer(unittest.TestCase): + def setUp(self): + self.inputs = create_paddle_inputs() + self.net = LayerCase() + + def train(self, net, to_static, with_prim=False, with_cinn=False): + if to_static: + paddle.set_flags({'FLAGS_prim_all': with_prim}) + if with_cinn: + build_strategy = paddle.static.BuildStrategy() + build_strategy.build_cinn_pass = True + net = paddle.jit.to_static( + net, build_strategy=build_strategy, full_graph=True + ) + else: + net = paddle.jit.to_static(net, full_graph=True) + paddle.seed(123) + outs = net(*self.inputs) + return outs + + def test_ast_prim_cinn(self): + st_out = self.train(self.net, to_static=True) + cinn_out = self.train( + self.net, to_static=True, with_prim=True, with_cinn=True + ) + for st, cinn in zip( + paddle.utils.flatten(st_out), paddle.utils.flatten(cinn_out) + ): + np.testing.assert_allclose(st.numpy(), cinn.numpy(), atol=1e-8) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_9_st.py b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_9_st.py new file mode 100644 index 0000000000000..ec3de44049f7d --- /dev/null +++ b/test/ir/pir/cinn/symbolic/test_sub_graph_stable_diffusion_9_st.py @@ -0,0 +1,84 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# repo: diffusers_sub_grpah +# model: stable_diffusion +# method:cast||api:paddle.tensor.attribute.shape||method:__getitem__||method:__getitem__||method:__getitem__||method:__getitem__ +import unittest + +import numpy as np + +import paddle + + +class LayerCase(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + var_0, # (shape: [1, 320, 1, 1], dtype: paddle.float32, stop_gradient: False) + ): + var_1 = var_0.cast('float32') + var_2 = paddle.tensor.attribute.shape(var_1) + var_3 = var_2[0] + var_4 = var_2[1] + var_5 = var_2[2] + var_6 = var_2[3] + return var_1, var_5, var_6 + + +def create_paddle_inputs(): + inputs = (paddle.rand(shape=[1, 320, 1, 1], dtype=paddle.float32),) + return inputs + + +def create_numpy_inputs(): + inputs = (np.random.random(size=[1, 320, 1, 1]).astype('float32'),) + return inputs + + +class TestLayer(unittest.TestCase): + def setUp(self): + self.inputs = create_paddle_inputs() + self.net = LayerCase() + + def train(self, net, to_static, with_prim=False, with_cinn=False): + if to_static: + paddle.set_flags({'FLAGS_prim_all': with_prim}) + if with_cinn: + build_strategy = paddle.static.BuildStrategy() + build_strategy.build_cinn_pass = True + net = paddle.jit.to_static( + net, build_strategy=build_strategy, full_graph=True + ) + else: + net = paddle.jit.to_static(net, full_graph=True) + paddle.seed(123) + outs = net(*self.inputs) + return outs + + def test_ast_prim_cinn(self): + st_out = self.train(self.net, to_static=True) + cinn_out = self.train( + self.net, to_static=True, with_prim=True, with_cinn=False + ) + for st, cinn in zip( + paddle.utils.flatten(st_out), paddle.utils.flatten(cinn_out) + ): + np.testing.assert_allclose(st.numpy(), cinn.numpy(), atol=1e-8) + + +if __name__ == '__main__': + unittest.main()