Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

【Hackathon 33 曙光算子修复】PR提交 #41624

Closed
wants to merge 82 commits into from
Closed

Conversation

ImNoBadBoy
Copy link

@ImNoBadBoy ImNoBadBoy commented Apr 11, 2022

PR types

【Hackathon 33 曙光算子修复】PR提交
#40300

PR changes

修改了四个文件

  • test_trunc_op.py
  • test_where_op.py
  • test_yolo_box_op.py
  • test_zeros_like_op.py

Describe

修改内容

  • test_trunc_op.py
vim /public/home/acv2h3zdq0/Paddle/build/python/paddle/fluid/tests/unittests/test_trunc_op.py
42         self.check_output()
45         self.check_grad(['X'], 'Out', numeric_grad_delta=1e-5)
  • test_where_op.py
vim /public/home/acv2h3zdq0/Paddle/build/python/paddle/fluid/tests/unittests/test_where_op.py
251             assert np.array_equal(out.cpu().numpy(), np.where(cond_i, x_i, y_i)) 
269             result = result.cpu().numpy()
270             expect = np.where(cond.cpu(), a.cpu(), b.cpu())
  • test_yolo_box_op.py
没有修改直接测试通过
  • test_zeros_like_op.py
 vim /public/home/acv2h3zdq0/Paddle/build/python/paddle/fluid/tests/unittests/test_zeros_like_op.py

73             if core.is_compiled_with_cuda():
 74                 self.assertEqual((out.cpu().numpy() == np.zeros(shape, dtype)).all(),True)
 75             else:
 76                 self.assertEqual((out.numpy() == np.zeros(shape, dtype)).all(),True)
 

78         if core.is_compiled_with_cuda():
 79             self.assertEqual((out.cpu().numpy() == np.zeros(shape, dtype)).all(), True)
 80         else:
 81             self.assertEqual((out.numpy() == np.zeros(shape, dtype)).all(), True)


83         if core.is_compiled_with_cuda():
 84             self.assertEqual((out.cpu().numpy() == np.zeros(shape, dtype)).all(), True)
 85         else:
 86             self.assertEqual((out.numpy() == np.zeros(shape, dtype)).all(), True)
 87         paddle.enable_static()
  • 代码
  • test_trunc_op.py
#   Copyright (c) 021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function

import unittest
import numpy as np
from op_test import OpTest
import paddle
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
from paddle.fluid.framework import _test_eager_guard

paddle.enable_static()


class TestTruncOp(OpTest):
    def setUp(self):
        self.op_type = "trunc"
        self.python_api = paddle.trunc
        self.dtype = np.float64
        np.random.seed(2021)
        self.inputs = {'X': np.random.random((20, 20)).astype(self.dtype)}
        self.outputs = {'Out': (np.trunc(self.inputs['X']))}
    
    def init_dtype_type(self):
        self.dtype = np.float64

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out', numeric_grad_delta=1e-5)


class TestFloatTruncOp(TestTruncOp):
    def init_dtype_type(self):
        self.dtype = np.float32


class TestIntTruncOp(TestTruncOp):
    def init_dtype_type(self):
        self.dtype = np.int32


class TestTruncAPI(unittest.TestCase):
    def setUp(self):
        self.shape = [20, 20]
        self.x = np.random.random((20, 20)).astype(np.float32)
        self.place = paddle.CPUPlace()

    def test_api_static(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.fluid.data('X', self.shape)
            out = paddle.trunc(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x}, fetch_list=[out])
        out_ref = np.trunc(self.x)
        for out in res:
            self.assertEqual(np.allclose(out, out_ref, rtol=1e-08), True)

    def test_api_dygraph(self):

        paddle.disable_static(self.place)
        x_tensor = paddle.to_tensor(self.x)
        out = paddle.trunc(x_tensor)
        out_ref = np.trunc(self.x)
        self.assertEqual(np.allclose(out.numpy(), out_ref, rtol=1e-08), True)
        paddle.enable_static()

    def test_api_eager(self):
        paddle.disable_static(self.place)

        with _test_eager_guard():
            x_tensor = paddle.to_tensor(self.x)
            out = paddle.trunc(x_tensor)
        out_ref = np.trunc(self.x)
        self.assertEqual(np.allclose(out.numpy(), out_ref, rtol=1e-08), True)
        paddle.enable_static()

    def test_api_eager_dygraph(self):
        with _test_eager_guard():
            self.test_api_dygraph()

    def test_errors(self):
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.fluid.data('X', [20, 20], 'bool')
            self.assertRaises(TypeError, paddle.trunc, x)


if __name__ == "__main__":
    unittest.main()
m __future__ import print_function



import unittest

import numpy as np

from op_test import OpTest

import paddle

import paddle.fluid.core as core

import paddle.fluid as fluid

from paddle.fluid import Program, program_guard

from paddle.fluid.framework import _test_eager_guard



paddle.enable_static()





class TestTruncOp(OpTest):

    def setUp(self):

        self.op_type = "trunc"

        self.python_api = paddle.trunc

        self.dtype = np.float64

        np.random.seed(2021)

        self.inputs = {'X': np.random.random((20, 20)).astype(self.dtype)}

        self.outputs = {'Out': (np.trunc(self.inputs['X']))}



    def init_dtype_type(self):

        self.dtype = np.float64



    def test_check_output(self):

        self.check_output()



    def test_check_grad(self):

        self.check_grad(['X'], 'Out', numeric_grad_delta=1e-5)





class TestFloatTruncOp(TestTruncOp):

    def init_dtype_type(self):

        self.dtype = np.float32





class TestIntTruncOp(TestTruncOp):

    def init_dtype_type(self):

        self.dtype = np.int32





class TestTruncAPI(unittest.TestCase):

    def setUp(self):

        self.shape = [20, 20]

        self.x = np.random.random((20, 20)).astype(np.float32)

        self.place = paddle.CPUPlace()



    def test_api_static(self):

        paddle.enable_static()

        with paddle.static.program_guard(paddle.static.Program()):

            x = paddle.fluid.data('X', self.shape)

            out = paddle.trunc(x)

            exe = paddle.static.Executor(self.place)

            res = exe.run(feed={'X': self.x}, fetch_list=[out])

        out_ref = np.trunc(self.x)

        for out in res:

            self.assertEqual(np.allclose(out, out_ref, rtol=1e-08), True)



    def test_api_dygraph(self):



        paddle.disable_static(self.place)

        x_tensor = paddle.to_tensor(self.x)

        out = paddle.trunc(x_tensor)

        out_ref = np.trunc(self.x)

        self.assertEqual(np.allclose(out.numpy(), out_ref, rtol=1e-08), True)

        paddle.enable_static()



    def test_api_eager(self):

        paddle.disable_static(self.place)



        with _test_eager_guard():

            x_tensor = paddle.to_tensor(self.x)

            out = paddle.trunc(x_tensor)

        out_ref = np.trunc(self.x)

        self.assertEqual(np.allclose(out.numpy(), out_ref, rtol=1e-08), True)

        paddle.enable_static()



    def test_api_eager_dygraph(self):

        with _test_eager_guard():

            self.test_api_dygraph()



    def test_errors(self):

        with paddle.static.program_guard(paddle.static.Program()):

            x = paddle.fluid.data('X', [20, 20], 'bool')

            self.assertRaises(TypeError, paddle.trunc, x)





if __name__ == "__main__":

    unittest.main()from __future__ import print_function



import unittest

import numpy as np

from op_test import OpTest

import paddle

import paddle.fluid.core as core

import paddle.fluid as fluid

from paddle.fluid import Program, program_guard

from paddle.fluid.framework import _test_eager_guard



paddle.enable_static()





class TestTruncOp(OpTest):

    def setUp(self):

        self.op_type = "trunc"

        self.python_api = paddle.trunc

        self.dtype = np.float64

        np.random.seed(2021)

        self.inputs = {'X': np.random.random((20, 20)).astype(self.dtype)}

        self.outputs = {'Out': (np.trunc(self.inputs['X']))}



    def init_dtype_type(self):

        self.dtype = np.float64



    def test_check_output(self):

        self.check_output()



    def test_check_grad(self):

        self.check_grad(['X'], 'Out', numeric_grad_delta=1e-5)





class TestFloatTruncOp(TestTruncOp):

    def init_dtype_type(self):

        self.dtype = np.float32





class TestIntTruncOp(TestTruncOp):

    def init_dtype_type(self):

        self.dtype = np.int32





class TestTruncAPI(unittest.TestCase):

    def setUp(self):

        self.shape = [20, 20]

        self.x = np.random.random((20, 20)).astype(np.float32)

        self.place = paddle.CPUPlace()



    def test_api_static(self):

        paddle.enable_static()

        with paddle.static.program_guard(paddle.static.Program()):

            x = paddle.fluid.data('X', self.shape)

            out = paddle.trunc(x)

            exe = paddle.static.Executor(self.place)

            res = exe.run(feed={'X': self.x}, fetch_list=[out])

        out_ref = np.trunc(self.x)

        for out in res:

            self.assertEqual(np.allclose(out, out_ref, rtol=1e-08), True)



    def test_api_dygraph(self):



        paddle.disable_static(self.place)

        x_tensor = paddle.to_tensor(self.x)

        out = paddle.trunc(x_tensor)

        out_ref = np.trunc(self.x)

        self.assertEqual(np.allclose(out.numpy(), out_ref, rtol=1e-08), True)

        paddle.enable_static()



    def test_api_eager(self):

        paddle.disable_static(self.place)



        with _test_eager_guard():

            x_tensor = paddle.to_tensor(self.x)

            out = paddle.trunc(x_tensor)

        out_ref = np.trunc(self.x)

        self.assertEqual(np.allclose(out.numpy(), out_ref, rtol=1e-08), True)

        paddle.enable_static()



    def test_api_eager_dygraph(self):

        with _test_eager_guard():

            self.test_api_dygraph()



    def test_errors(self):

        with paddle.static.program_guard(paddle.static.Program()):

            x = paddle.fluid.data('X', [20, 20], 'bool')

            self.assertRaises(TypeError, paddle.trunc, x)





if __name__ == "__main__":

    unittest.main()
  • test_where_op.py
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
# 
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# 
#     http://www.apache.org/licenses/LICENSE-2.0
# 
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import paddle.fluid.core as core
from op_test import OpTest
from paddle.fluid import compiler, Program, program_guard
from paddle.fluid.op import Operator
from paddle.fluid.backward import append_backward
from paddle.fluid.framework import _test_eager_guard


class TestWhereOp(OpTest):
    def setUp(self):
        self.op_type = 'where'
        self.python_api = paddle.where
        self.init_config()
        self.inputs = {'Condition': self.cond, 'X': self.x, 'Y': self.y}
        self.outputs = {'Out': np.where(self.cond, self.x, self.y)}

    def test_check_output(self):
        self.check_output(check_eager=False)

    def test_check_grad(self):
        self.check_grad(['X', 'Y'], 'Out', check_eager=False)

    def init_config(self):
        self.x = np.random.uniform((-3), 5, 100).astype('float64')
        self.y = np.random.uniform((-3), 5, 100).astype('float64')
        self.cond = np.zeros(100).astype('bool')


class TestWhereOp2(TestWhereOp):
    def init_config(self):
        self.x = np.random.uniform((-5), 5, (60, 2)).astype('float64')
        self.y = np.random.uniform((-5), 5, (60, 2)).astype('float64')
        self.cond = np.ones((60, 2)).astype('bool')


class TestWhereOp3(TestWhereOp):
    def init_config(self):
        self.x = np.random.uniform((-3), 5, (20, 2, 4)).astype('float64')
        self.y = np.random.uniform((-3), 5, (20, 2, 4)).astype('float64')
        self.cond = np.array(np.random.randint(2, size=(20, 2, 4)), dtype=bool)


class TestWhereAPI(unittest.TestCase):
    def setUp(self):
        self.init_data()

    def init_data(self):
        self.shape = [10, 15]
        self.cond = np.array(np.random.randint(2, size=self.shape), dtype=bool)
        self.x = np.random.uniform((-2), 3, self.shape).astype(np.float32)
        self.y = np.random.uniform((-2), 3, self.shape).astype(np.float32)
        self.out = np.where(self.cond, self.x, self.y)

    def ref_x_backward(self, dout):
        return np.where((self.cond == True), dout, 0)

    def ref_y_backward(self, dout):
        return np.where((self.cond == False), dout, 0)

    def test_api(self, use_cuda=False):
        for x_stop_gradient in [False, True]:
            for y_stop_gradient in [False, True]:
                with fluid.program_guard(Program(), Program()):
                    cond = fluid.layers.data(
                        name='cond', shape=self.shape, dtype='bool')
                    x = fluid.layers.data(
                        name='x', shape=self.shape, dtype='float32')
                    y = fluid.layers.data(
                        name='y', shape=self.shape, dtype='float32')
                    x.stop_gradient = x_stop_gradient
                    y.stop_gradient = y_stop_gradient
                    result = paddle.where(cond, x, y)
                    append_backward(layers.mean(result))
                    for use_cuda in [False, True]:
                        if (use_cuda and
                            (not fluid.core.is_compiled_with_cuda())):
                            break
                        place = (fluid.CUDAPlace(0)
                                 if use_cuda else fluid.CPUPlace())
                        exe = fluid.Executor(place)
                        fetch_list = [result, result.grad_name]
                        if (x_stop_gradient is False):
                            fetch_list.append(x.grad_name)
                        if (y_stop_gradient is False):
                            fetch_list.append(y.grad_name)
                        out = exe.run(
                            fluid.default_main_program(),
                            feed={'cond': self.cond,
                                  'x': self.x,
                                  'y': self.y},
                            fetch_list=fetch_list)
                        assert np.array_equal(out[0], self.out)
                        if (x_stop_gradient is False):
                            assert np.array_equal(out[2],
                                                  self.ref_x_backward(out[1]))
                            if (y.stop_gradient is False):
                                assert np.array_equal(
                                    out[3], self.ref_y_backward(out[1]))
                        elif (y.stop_gradient is False):
                            assert np.array_equal(out[2],
                                                  self.ref_y_backward(out[1]))

    def test_api_broadcast(self, use_cuda=False):
        main_program = Program()
        with fluid.program_guard(main_program):
            x = fluid.layers.data(name='x', shape=[4, 1], dtype='float32')
            y = fluid.layers.data(name='y', shape=[4, 2], dtype='float32')
            x_i = np.array([[0.9383, 0.1983, 3.2, 1.2]]).astype('float32')
            y_i = np.array(
                [[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]]).astype('float32')
            result = paddle.where((x > 1), x=x, y=y)
            for use_cuda in [False, True]:
                if (use_cuda and (not fluid.core.is_compiled_with_cuda())):
                    return
                place = (fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace())
                exe = fluid.Executor(place)
                out = exe.run(fluid.default_main_program(),
                              feed={'x': x_i,
                                    'y': y_i},
                              fetch_list=[result])
                assert np.array_equal(out[0], np.where((x_i > 1), x_i, y_i))

    def test_scalar(self):
        paddle.enable_static()
        main_program = Program()
        with fluid.program_guard(main_program):
            cond_shape = [2, 4]
            cond = fluid.layers.data(
                name='cond', shape=cond_shape, dtype='bool')
            x_data = 1.0
            y_data = 2.0
            cond_data = np.array([False, False, True, True]).astype('bool')
            result = paddle.where(condition=cond, x=x_data, y=y_data)
            for use_cuda in [False, True]:
                if (use_cuda and (not fluid.core.is_compiled_with_cuda())):
                    return
                place = (fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace())
                exe = fluid.Executor(place)
                out = exe.run(fluid.default_main_program(),
                              feed={'cond': cond_data},
                              fetch_list=[result])
                expect = np.where(cond_data, x_data, y_data)
                assert np.array_equal(out[0], expect)

    def __test_where_with_broadcast_static(self, cond_shape, x_shape, y_shape):
        paddle.enable_static()
        main_program = Program()
        with fluid.program_guard(main_program):
            cond = fluid.layers.data(
                name='cond', shape=cond_shape, dtype='bool')
            x = fluid.layers.data(name='x', shape=x_shape, dtype='float32')
            y = fluid.layers.data(name='y', shape=y_shape, dtype='float32')
            cond_data_tmp = np.random.random(size=cond_shape).astype('float32')
            cond_data = (cond_data_tmp < 0.3)
            x_data = np.random.random(size=x_shape).astype('float32')
            y_data = np.random.random(size=y_shape).astype('float32')
            result = paddle.where(condition=cond, x=x, y=y)
            for use_cuda in [False, True]:
                if (use_cuda and (not fluid.core.is_compiled_with_cuda())):
                    return
                place = (fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace())
                exe = fluid.Executor(place)
                out = exe.run(
                    fluid.default_main_program(),
                    feed={'cond': cond_data,
                          'x': x_data,
                          'y': y_data},
                    fetch_list=[result])
                expect = np.where(cond_data, x_data, y_data)
                assert np.array_equal(out[0], expect)

    def test_static_api_broadcast_1(self):
        cond_shape = [2, 4]
        a_shape = [2, 2, 4]
        b_shape = [2, 2, 4]
        self.__test_where_with_broadcast_static(cond_shape, a_shape, b_shape)

    def test_static_api_broadcast_2(self):
        cond_shape = [2, 1]
        a_shape = [2, 2, 4]
        b_shape = [2, 2, 4]
        self.__test_where_with_broadcast_static(cond_shape, a_shape, b_shape)

    def test_static_api_broadcast_3(self):
        cond_shape = [2, 2, 1]
        a_shape = [2, 2, 4]
        b_shape = [2, 2, 4]
        self.__test_where_with_broadcast_static(cond_shape, a_shape, b_shape)

    def test_static_api_broadcast_4(self):
        cond_shape = [2, 1, 4]
        a_shape = [2, 2, 4]
        b_shape = [2, 2, 4]
        self.__test_where_with_broadcast_static(cond_shape, a_shape, b_shape)

    def test_static_api_broadcast_5(self):
        cond_shape = [3, 2, 2, 4]
        a_shape = [2, 2, 4]
        b_shape = [2, 2, 4]
        self.__test_where_with_broadcast_static(cond_shape, a_shape, b_shape)

    def test_static_api_broadcast_6(self):
        cond_shape = [2, 2, 4]
        a_shape = [2, 2, 1]
        b_shape = [2, 2, 1]
        self.__test_where_with_broadcast_static(cond_shape, a_shape, b_shape)

    def test_static_api_broadcast_7(self):
        cond_shape = [2, 2, 4]
        a_shape = [2, 1, 4]
        b_shape = [2, 1, 4]
        self.__test_where_with_broadcast_static(cond_shape, a_shape, b_shape)

    def test_static_api_broadcast_8(self):
        cond_shape = [3, 2, 2, 4]
        a_shape = [2, 2, 1]
        b_shape = [2, 2, 1]
        self.__test_where_with_broadcast_static(cond_shape, a_shape, b_shape)


class TestWhereDygraphAPI(unittest.TestCase):
    def test_api(self):
        with fluid.dygraph.guard():
            x_i = np.array([0.9383, 0.1983, 3.2, 1.2]).astype('float64')
            y_i = np.array([1.0, 1.0, 1.0, 1.0]).astype('float64')
            cond_i = np.array([False, False, True, True]).astype('bool')
            x = fluid.dygraph.to_variable(x_i)
            y = fluid.dygraph.to_variable(y_i)
            cond = fluid.dygraph.to_variable(cond_i)
            out = paddle.where(cond, x, y)
            assert np.array_equal(out.cpu().numpy(), np.where(cond_i, x_i, y_i))

    def test_scalar(self):
        with fluid.dygraph.guard():
            cond_i = np.array([False, False, True, True]).astype('bool')
            x = 1.0
            y = 2.0
            cond = fluid.dygraph.to_variable(cond_i)
            out = paddle.where(cond, x, y)
            assert np.array_equal(out.numpy(), np.where(cond_i, x, y))

    def __test_where_with_broadcast_dygraph(self, cond_shape, a_shape, b_shape):
        with fluid.dygraph.guard():
            cond_tmp = paddle.rand(cond_shape)
            cond = (cond_tmp < 0.3)
            a = paddle.rand(a_shape)
            b = paddle.rand(b_shape)
            result = paddle.where(cond, a, b)
            result = result.cpu().numpy()
            expect = np.where(cond.cpu(), a.cpu(), b.cpu())
            self.assertTrue(np.array_equal(expect, result))

    def test_dygraph_api_broadcast_1(self):
        cond_shape = [2, 4]
        a_shape = [2, 2, 4]
        b_shape = [2, 2, 4]
        self.__test_where_with_broadcast_dygraph(cond_shape, a_shape, b_shape)

    def test_dygraph_api_broadcast_2(self):
        cond_shape = [2, 1]
        a_shape = [2, 2, 4]
        b_shape = [2, 2, 4]
        self.__test_where_with_broadcast_dygraph(cond_shape, a_shape, b_shape)

    def test_dygraph_api_broadcast_3(self):
        cond_shape = [2, 2, 1]
        a_shape = [2, 2, 4]
        b_shape = [2, 2, 4]
        self.__test_where_with_broadcast_dygraph(cond_shape, a_shape, b_shape)

    def test_dygraph_api_broadcast_4(self):
        cond_shape = [2, 1, 4]
        a_shape = [2, 2, 4]
        b_shape = [2, 2, 4]
        self.__test_where_with_broadcast_dygraph(cond_shape, a_shape, b_shape)

    def test_dygraph_api_broadcast_5(self):
        cond_shape = [3, 2, 2, 4]
        a_shape = [2, 2, 4]
        b_shape = [2, 2, 4]
        self.__test_where_with_broadcast_dygraph(cond_shape, a_shape, b_shape)

    def test_dygraph_api_broadcast_6(self):
        cond_shape = [2, 2, 4]
        a_shape = [2, 2, 1]
        b_shape = [2, 2, 1]
        self.__test_where_with_broadcast_dygraph(cond_shape, a_shape, b_shape)

    def test_dygraph_api_broadcast_7(self):
        cond_shape = [2, 2, 4]
        a_shape = [2, 1, 4]
        b_shape = [2, 1, 4]
        self.__test_where_with_broadcast_dygraph(cond_shape, a_shape, b_shape)

    def test_dygraph_api_broadcast_8(self):
        cond_shape = [3, 2, 2, 4]
        a_shape = [2, 2, 1]
        b_shape = [2, 2, 1]
        self.__test_where_with_broadcast_dygraph(cond_shape, a_shape, b_shape)

    def test_where_condition(self):
        data = np.array([[True, False], [False, True]])
        with program_guard(Program(), Program()):
            x = fluid.layers.data(name='x', shape=[(-1), 2])
            y = paddle.where(x)
            self.assertEqual(type(y), tuple)
            self.assertEqual(len(y), 2)
            z = fluid.layers.concat(list(y), axis=1)
            exe = fluid.Executor(fluid.CPUPlace())
            (res, ) = exe.run(feed={'x': data},
                              fetch_list=[z.name],
                              return_numpy=False)
        expect_out = np.array([[0, 0], [1, 1]])
        self.assertTrue(np.allclose(expect_out, np.array(res)))
        data = np.array([True, True, False])
        with program_guard(Program(), Program()):
            x = fluid.layers.data(name='x', shape=[(-1)])
            y = paddle.where(x)
            self.assertEqual(type(y), tuple)
            self.assertEqual(len(y), 1)
            z = fluid.layers.concat(list(y), axis=1)
            exe = fluid.Executor(fluid.CPUPlace())
            (res, ) = exe.run(feed={'x': data},
                              fetch_list=[z.name],
                              return_numpy=False)
        expect_out = np.array([[0], [1]])
        self.assertTrue(np.allclose(expect_out, np.array(res)))

    def test_eager(self):
        with _test_eager_guard():
            self.test_api()
            self.test_dygraph_api_broadcast_1()
            self.test_dygraph_api_broadcast_2()
            self.test_dygraph_api_broadcast_3()
            self.test_dygraph_api_broadcast_4()
            self.test_dygraph_api_broadcast_5()
            self.test_dygraph_api_broadcast_6()
            self.test_dygraph_api_broadcast_7()
            self.test_dygraph_api_broadcast_8()


class TestWhereOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program(), Program()):
            x_i = np.array([0.9383, 0.1983, 3.2, 1.2]).astype('float64')
            y_i = np.array([1.0, 1.0, 1.0, 1.0]).astype('float64')
            cond_i = np.array([False, False, True, True]).astype('bool')

            def test_Variable():
                paddle.where(cond_i, x_i, y_i)

            self.assertRaises(TypeError, test_Variable)

            def test_type():
                x = fluid.layers.data(name='x', shape=[4], dtype='bool')
                y = fluid.layers.data(name='y', shape=[4], dtype='float16')
                cond = fluid.layers.data(name='cond', shape=[4], dtype='int32')
                paddle.where(cond, x, y)

            self.assertRaises(TypeError, test_type)

    def test_value_error(self):
        with fluid.dygraph.guard():
            cond_shape = [2, 2, 4]
            cond_tmp = paddle.rand(cond_shape)
            cond = (cond_tmp < 0.3)
            a = paddle.rand(cond_shape)
            self.assertRaises(ValueError, paddle.where, cond, a)

    def test_eager(self):
        with _test_eager_guard():
            self.test_value_error()


if __name__ == "__main__":
    paddle.enable_static()
    unittest.main()
  • test_yolo_box_op.py
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
# 
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# 
#     http://www.apache.org/licenses/LICENSE-2.0
# 
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import division
import unittest
import numpy as np
from op_test import OpTest
import paddle
from paddle.fluid import core
from paddle.fluid.framework import _test_eager_guard


def sigmoid(x):
    return (1.0 / (1.0 + np.exp(((-1.0) * x))))


def YoloBox(x, img_size, attrs):
    (n, c, h, w) = x.shape
    anchors = attrs['anchors']
    an_num = int((len(anchors) // 2))
    class_num = attrs['class_num']
    conf_thresh = attrs['conf_thresh']
    downsample = attrs['downsample']
    clip_bbox = attrs['clip_bbox']
    scale_x_y = attrs['scale_x_y']
    iou_aware = attrs['iou_aware']
    iou_aware_factor = attrs['iou_aware_factor']
    bias_x_y = ((-0.5) * (scale_x_y - 1.0))
    input_h = (downsample * h)
    input_w = (downsample * w)
    if iou_aware:
        ioup = x[:, :an_num, :, :]
        ioup = np.expand_dims(ioup, axis=(-1))
        x = x[:, an_num:, :, :]
    x = x.reshape((n, an_num, (5 + class_num), h, w)).transpose((0, 1, 3, 4, 2))
    pred_box = x[:, :, :, :, :4].copy()
    grid_x = np.tile(np.arange(w).reshape((1, w)), (h, 1))
    grid_y = np.tile(np.arange(h).reshape((h, 1)), (1, w))
    pred_box[:, :, :, :, 0] = ((
        (grid_x + (sigmoid(pred_box[:, :, :, :, 0]) * scale_x_y)) + bias_x_y) /
                               w)
    pred_box[:, :, :, :, 1] = ((
        (grid_y + (sigmoid(pred_box[:, :, :, :, 1]) * scale_x_y)) + bias_x_y) /
                               h)
    anchors = [(anchors[i], anchors[(i + 1)])
               for i in range(0, len(anchors), 2)]
    anchors_s = np.array(
        [((an_w / input_w), (an_h / input_h)) for (an_w, an_h) in anchors])
    anchor_w = anchors_s[:, 0:1].reshape((1, an_num, 1, 1))
    anchor_h = anchors_s[:, 1:2].reshape((1, an_num, 1, 1))
    pred_box[:, :, :, :, 2] = (np.exp(pred_box[:, :, :, :, 2]) * anchor_w)
    pred_box[:, :, :, :, 3] = (np.exp(pred_box[:, :, :, :, 3]) * anchor_h)
    if iou_aware:
        pred_conf = ((sigmoid(x[:, :, :, :, 4:5])**(1 - iou_aware_factor)) *
                     (sigmoid(ioup)**iou_aware_factor))
    else:
        pred_conf = sigmoid(x[:, :, :, :, 4:5])
    pred_conf[(pred_conf < conf_thresh)] = 0.0
    pred_score = (sigmoid(x[:, :, :, :, 5:]) * pred_conf)
    pred_box = (pred_box * (pred_conf > 0.0).astype('float32'))
    pred_box = pred_box.reshape((n, (-1), 4))
    (pred_box[:, :, :2], pred_box[:, :, 2:4]) = (
        (pred_box[:, :, :2] - (pred_box[:, :, 2:4] / 2.0)),
        (pred_box[:, :, :2] + (pred_box[:, :, 2:4] / 2.0)))
    pred_box[:, :, 0] = (pred_box[:, :, 0] * img_size[:, 1][:, np.newaxis])
    pred_box[:, :, 1] = (pred_box[:, :, 1] * img_size[:, 0][:, np.newaxis])
    pred_box[:, :, 2] = (pred_box[:, :, 2] * img_size[:, 1][:, np.newaxis])
    pred_box[:, :, 3] = (pred_box[:, :, 3] * img_size[:, 0][:, np.newaxis])
    if clip_bbox:
        for i in range(len(pred_box)):
            pred_box[i, :, 0] = np.clip(pred_box[i, :, 0], 0, np.inf)
            pred_box[i, :, 1] = np.clip(pred_box[i, :, 1], 0, np.inf)
            pred_box[i, :, 2] = np.clip(pred_box[i, :, 2], (-np.inf),
                                        (img_size[(i, 1)] - 1))
            pred_box[i, :, 3] = np.clip(pred_box[i, :, 3], (-np.inf),
                                        (img_size[(i, 0)] - 1))
    return (pred_box, pred_score.reshape((n, (-1), class_num)))


class TestYoloBoxOp(OpTest):
    def setUp(self):
        self.initTestCase()
        self.op_type = 'yolo_box'
        x = np.random.random(self.x_shape).astype('float32')
        img_size = np.random.randint(10, 20, self.imgsize_shape).astype('int32')
        self.attrs = {
            'anchors': self.anchors,
            'class_num': self.class_num,
            'conf_thresh': self.conf_thresh,
            'downsample': self.downsample,
            'clip_bbox': self.clip_bbox,
            'scale_x_y': self.scale_x_y,
            'iou_aware': self.iou_aware,
            'iou_aware_factor': self.iou_aware_factor
        }
        self.inputs = {'X': x, 'ImgSize': img_size}
        (boxes, scores) = YoloBox(x, img_size, self.attrs)
        self.outputs = {'Boxes': boxes, 'Scores': scores}

    def test_check_output(self):
        self.check_output(check_eager=False)

    def initTestCase(self):
        self.anchors = [10, 13, 16, 30, 33, 23]
        an_num = int((len(self.anchors) // 2))
        self.batch_size = 32
        self.class_num = 2
        self.conf_thresh = 0.5
        self.downsample = 32
        self.clip_bbox = True
        self.x_shape = (self.batch_size, (an_num * (5 + self.class_num)), 13,
                        13)
        self.imgsize_shape = (self.batch_size, 2)
        self.scale_x_y = 1.0
        self.iou_aware = False
        self.iou_aware_factor = 0.5


class TestYoloBoxOpNoClipBbox(TestYoloBoxOp):
    def initTestCase(self):
        self.anchors = [10, 13, 16, 30, 33, 23]
        an_num = int((len(self.anchors) // 2))
        self.batch_size = 32
        self.class_num = 2
        self.conf_thresh = 0.5
        self.downsample = 32
        self.clip_bbox = False
        self.x_shape = (self.batch_size, (an_num * (5 + self.class_num)), 13,
                        13)
        self.imgsize_shape = (self.batch_size, 2)
        self.scale_x_y = 1.0
        self.iou_aware = False
        self.iou_aware_factor = 0.5


class TestYoloBoxOpScaleXY(TestYoloBoxOp):
    def initTestCase(self):
        self.anchors = [10, 13, 16, 30, 33, 23]
        an_num = int((len(self.anchors) // 2))
        self.batch_size = 32
        self.class_num = 2
        self.conf_thresh = 0.5
        self.downsample = 32
        self.clip_bbox = True
        self.x_shape = (self.batch_size, (an_num * (5 + self.class_num)), 13,
                        13)
        self.imgsize_shape = (self.batch_size, 2)
        self.scale_x_y = 1.2
        self.iou_aware = False
        self.iou_aware_factor = 0.5


class TestYoloBoxOpIoUAware(TestYoloBoxOp):
    def initTestCase(self):
        self.anchors = [10, 13, 16, 30, 33, 23]
        an_num = int((len(self.anchors) // 2))
        self.batch_size = 32
        self.class_num = 2
        self.conf_thresh = 0.5
        self.downsample = 32
        self.clip_bbox = True
        self.x_shape = (self.batch_size, (an_num * (6 + self.class_num)), 13,
                        13)
        self.imgsize_shape = (self.batch_size, 2)
        self.scale_x_y = 1.0
        self.iou_aware = True
        self.iou_aware_factor = 0.5


class TestYoloBoxDygraph(unittest.TestCase):
    def test_dygraph(self):
        paddle.disable_static()
        img_size = np.ones((2, 2)).astype('int32')
        img_size = paddle.to_tensor(img_size)
        x1 = np.random.random([2, 14, 8, 8]).astype('float32')
        x1 = paddle.to_tensor(x1)
        (boxes, scores) = paddle.vision.ops.yolo_box(
            x1,
            img_size=img_size,
            anchors=[10, 13, 16, 30],
            class_num=2,
            conf_thresh=0.01,
            downsample_ratio=8,
            clip_bbox=True,
            scale_x_y=1.0)
        assert ((boxes is not None) and (scores is not None))
        x2 = np.random.random([2, 16, 8, 8]).astype('float32')
        x2 = paddle.to_tensor(x2)
        (boxes, scores) = paddle.vision.ops.yolo_box(
            x2,
            img_size=img_size,
            anchors=[10, 13, 16, 30],
            class_num=2,
            conf_thresh=0.01,
            downsample_ratio=8,
            clip_bbox=True,
            scale_x_y=1.0,
            iou_aware=True,
            iou_aware_factor=0.5)
        paddle.enable_static()

    def test_eager(self):
        with _test_eager_guard():
            self.test_dygraph()


class TestYoloBoxStatic(unittest.TestCase):
    def test_static(self):
        x1 = paddle.static.data('x1', [2, 14, 8, 8], 'float32')
        img_size = paddle.static.data('img_size', [2, 2], 'int32')
        (boxes, scores) = paddle.vision.ops.yolo_box(
            x1,
            img_size=img_size,
            anchors=[10, 13, 16, 30],
            class_num=2,
            conf_thresh=0.01,
            downsample_ratio=8,
            clip_bbox=True,
            scale_x_y=1.0)
        assert ((boxes is not None) and (scores is not None))
        x2 = paddle.static.data('x2', [2, 16, 8, 8], 'float32')
        (boxes, scores) = paddle.vision.ops.yolo_box(
            x2,
            img_size=img_size,
            anchors=[10, 13, 16, 30],
            class_num=2,
            conf_thresh=0.01,
            downsample_ratio=8,
            clip_bbox=True,
            scale_x_y=1.0,
            iou_aware=True,
            iou_aware_factor=0.5)
        assert ((boxes is not None) and (scores is not None))


class TestYoloBoxOpHW(TestYoloBoxOp):
    def initTestCase(self):
        self.anchors = [10, 13, 16, 30, 33, 23]
        an_num = int((len(self.anchors) // 2))
        self.batch_size = 32
        self.class_num = 2
        self.conf_thresh = 0.5
        self.downsample = 32
        self.clip_bbox = False
        self.x_shape = (self.batch_size, (an_num * (5 + self.class_num)), 13, 9)
        self.imgsize_shape = (self.batch_size, 2)
        self.scale_x_y = 1.0
        self.iou_aware = False
        self.iou_aware_factor = 0.5


if __name__ == '__main__':
    paddle.enable_static()
    unittest.main()
  • test_zeros_like_op.py
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
# 
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# 
#     http://www.apache.org/licenses/LICENSE-2.0
# 
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
from paddle import zeros_like
from paddle.fluid import core, Program, program_guard
from paddle.fluid.framework import _test_eager_guard


class TestZerosLikeAPIError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program(), Program()):
            x = paddle.fluid.data('x', [3, 4])
            self.assertRaises(TypeError, zeros_like, x, 'int8')

    def test_eager(self):
        with _test_eager_guard():
            self.test_errors()


class TestZerosLikeAPI(unittest.TestCase):
    def test_api(self):
        shape = [3, 4]
        startup_program = Program()
        train_program = Program()
        with program_guard(train_program, startup_program):
            x = paddle.fluid.data('X', shape)
            out1 = zeros_like(x)
            out2 = zeros_like(x, np.bool)
            out3 = zeros_like(x, 'float64')
            out4 = zeros_like(x, 'int32')
            out5 = zeros_like(x, 'int64')
        place = (fluid.CUDAPlace(0)
                 if core.is_compiled_with_cuda() else fluid.CPUPlace())
        exe = fluid.Executor(place)
        outs = exe.run(train_program,
                       feed={'X': np.ones(shape).astype('float32')},
                       fetch_list=[out1, out2, out3, out4, out5])
        for (i, dtype) in enumerate(
            [np.float32, np.bool, np.float64, np.int32, np.int64]):
            self.assertEqual(outs[i].dtype, dtype)
            self.assertEqual((outs[i] == np.zeros(shape, dtype)).all(), True)

    def test_eager(self):
        with _test_eager_guard():
            self.test_api()


class TestZerosLikeImpeartive(unittest.TestCase):
    def test_out(self):
        shape = [3, 4]
        place = (fluid.CUDAPlace(0)
                 if core.is_compiled_with_cuda() else fluid.CPUPlace())
        paddle.disable_static(place)
        x = paddle.to_tensor(np.ones(shape))
        for dtype in [np.bool, np.float32, np.float64, np.int32, np.int64]:
            out = zeros_like(x, dtype)
            if core.is_compiled_with_cuda():
                self.assertEqual((out.cpu().numpy() == np.zeros(shape, dtype)).all(),True)
            else:
                self.assertEqual((out.numpy() == np.zeros(shape, dtype)).all(),True)
        out = paddle.tensor.zeros_like(x)
        if core.is_compiled_with_cuda():
            self.assertEqual((out.cpu().numpy() == np.zeros(shape, dtype)).all(), True)
        else:
            self.assertEqual((out.numpy() == np.zeros(shape, dtype)).all(), True)
        out = paddle.tensor.creation.zeros_like(x)
        if core.is_compiled_with_cuda():
            self.assertEqual((out.cpu().numpy() == np.zeros(shape, dtype)).all(), True)
        else:
            self.assertEqual((out.numpy() == np.zeros(shape, dtype)).all(), True)
        paddle.enable_static()

    def test_eager(self):
        with _test_eager_guard():
            self.test_out()


if (__name__ == '__main__'):
    unittest.main()

zhwesky2010 and others added 14 commits April 7, 2022 15:55
cherry-pick

fix compile bug of windows cuda11.5 #41433
【chery-pick #41430】fix bug of random compile failure, due to incorrect compile order of dependencies
[Cherry-Pick]Fix eager try catch (#41438)
* add one_hot gpu hint

* move allow_out_of_range judgement

* delete useless unittest
* Use `self`as a parameter of _hash_with_id function to avoid error caused by hash_id reuse (#41200)

* Add fill_constant_batch_size YAML and UT (#41474)

* Switch some dy2st UT to eager mode (#41382)

* Sitch some dy2st UT to eager mode

* Fix test_lstm and remove test_transformer

* Run test_resnet_v2 in old dy mode
…h Getting tensor place impl (#41539)

* [Phi] Polish truncated normal kernel and add yaml (#41280)

* polish truncated normal kernel

* add yaml

* add truncated normal kernel and add yaml

* polish unittests and yaml

* import dygraph mehtod

* add unique yaml and final state api (#41460)

* fix get tensor backend set bug (#41478)

* [Phi] Add unbind yaml and final state api (#41277)

* add unbind yaml

* fix unittest

* [Phi] Add swish yaml and final state api (#41479)

* add swish yaml and final state api

* skip mkldnn test

* fix grad mkldnn test

* add cherry-pick lost code
* [Dygraph] Remove unrequired UT cases of DP in eager mode (#41413)

* remove unrequired ut cases

* update

* fix bugs

* update

* update
…multi version (#41503)

* change inference demo_test build method to ninja to choose visual studio version automaticly

* notest;test=windows_ci_inference

* set cuda of demo_ci by arg,fix bug of ninja compile,test=document_fix;test=windows_ci;test=windows_ci_inference

* fix bug;test=document_fix;test=windows_ci;test=windows_ci_inference

* fix bug;test=document_fix;test=windows_ci_inference"

* set lib_path according to generator
…eadpool (#41567) (#41575)

* fix bug that no thread is waked up when adding task to threadpool

* fix typo
@paddle-bot-old paddle-bot-old bot added contributor External developers status: proposed labels Apr 11, 2022
@paddle-bot-old
Copy link

❌ The PR is not created using PR's template. You can refer to this Demo.
Please use PR's template, it helps save our maintainers' time so that more developers get helped.

@paddle-bot-old
Copy link

你的PR提交成功,感谢你对开源项目的贡献!
请关注后续CI自动化测试结果,详情请参考Paddle-CI手册
Your PR has been submitted. Thanks for your contribution!
Please wait for the result of CI firstly. See Paddle CI Manual for details.

Liu-xiandong and others added 10 commits April 11, 2022 13:41
…#41595)

fix bug when TruncatedNormal cannot fall back in cpu
* [Eager]Fix segment_pool/allclose/isclose/scale API bug (#41506)

* [Eager]Fix segment_pool/allclose/isclose/scale API bug

* fix kernel register problem

* add norm, segment_pool (#41465)

Co-authored-by: hong <[email protected]>
* update name

* update name

* fix test

* fix fleet bind

* update name

* update name

* fix test

* fix gpups wrapper

* remove Push/Pull/Load/Save with context in client and wrapper base class

* fix

* fix

* remove some interface

* fix

* remove

* code style

* recover

* fix

* remove code unused

* remove some unused table & accessor & CommonDenseTable => MemoryDenseTable

* fix

* fix

* fix

* recover

* remove unused code

* recover unittest

* fix

* remove

* fix

* remove code unuseful

* remove

* fix

* recover

* remove

Co-authored-by: esythan <[email protected]>

Co-authored-by: esythan <[email protected]>
#41618)

* full api fix

* when out is None, go old dygraph mode

* fix

* add name for buffer

* fix by code review

* fix

* by static check
MingMingShangTian and others added 14 commits April 14, 2022 11:16
* [Yaml]add exp yaml (#41217)

* add exp yaml

* add exp api in test case

* add determinant yaml

* fix exp op unittest

* change test class name

* modify api name

* compacted with raw api

* fix det api

* add python_api

* add test eager for determinant op

* [Yaml] Add assign yaml (#41428)

* add assign yaml

* add assign api

* add assign backward api

* add assign

* add assign yaml

* add assign

* assign yaml

* add assign raw kernel and use assign_raw in yaml

* merge develop branch

* add missing python_api

* exchange assign and assign_raw kernel name (#41625)

* exchange assign and assign_raw kernel name

* fix register error

* [Yaml]add gaussian_random yaml and test case (#41312)

* add guassian random yaml

* add gaussian_random yaml and test case

* fix error modify of full yaml

* import in_dygraph_mode

* import _in_legacy_dygraph

* add place arg in api

* import __current_expected_place

* fix test_egr_python_api failed case

* add test case

* add cast for NormalInitializer

* fix test error

* fix test error

* rm unsed check code

* fix test error in test_initializer_nn

* modify by review

* [Phi]fix split error when sections has 0 size and add test case (#41708)

* fix split error when sections has 0 size and add test case

* fix test case
* add context pool unittests

* fix timeout

* polish details

* change option pos

* add dll decl for wndows

* fix pre-commit error

* move dll_decl and export DeviceContext

* replace lost dll_decl.h
* add new method for custom double grad

* add tanh double grad unittest

* change year

* revert tensor init method
* add multi_dot,maxout,multiplex yaml

* add code converage
cherry-pick PR#41276 到 release/2.3

    添加路径:paddle.sparse,paddle.sparse.functional,paddle.sparse.layer
    添加API:paddle.sparse.sparse_coo_tensor(用于创建COO格式的Sparse Tensor )和 paddle.sparse.sparse_csr_tensor(用于创建CSR格式的Sparse Tensor )
    添加API:paddle.sparse.ReLU
…) (#41834)

Add paddle.sparse and three Sparse API (#41276)
Add Sparse API to_dense, to_sparse_coo and values (#41394)
* the one ps proto (#41659)

* the one ps proto

* the one ps proto

* fix

* fix

* fix

* fix windows ci

* fix windows ci

* add dependency

* add dependency

* fix bug of ps_py_proto cant find path for the folder not created (#41793)

Co-authored-by: Sing_chan <[email protected]>
* fix data transform problem for cudnn backend (#41622)

* Fix problem of infermeta with vector output (#41646)

* remove stack_grad infershape

* fix bug of output with null

* fix bug
@TCChenlong
Copy link
Contributor

这个PR提的还是有问题的,不应该是从release/2.3 合并到 develop,可以再看一下文档的说明:https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/dev_guides/git_guides/local_dev_guide_cn.html
image

@ImNoBadBoy
Copy link
Author

ImNoBadBoy commented Apr 18, 2022 via email

* [Phi&CustomOp] Remove deprecated enum PlaceType for custom op & add warning (#41647)

* remove old custom op placetype

* replace dist  placetype using

* add with gpu macro

* fix mutable_data error

* fix set value error

* add comment

* remove all is initialized using (#41766)

* remove inner_place using (#41768)

* polish tensor depreacted method warning (#41807)

* [CustomOp] Fix PlaceType related compat error (#41826)

* fix place type related compat error

* fix test failed

* remove dll decl

* revert place type change

* add dll decl

* resolve conflict
@ImNoBadBoy
Copy link
Author

ImNoBadBoy commented Apr 18, 2022 via email

joey12300 and others added 10 commits April 18, 2022 10:51
* Add core.eager.StringTensor __init__ which pyarray args can be passed

* Add the numpy method of core.eager.StringTensor

* revert tensor.to_string modification

* Add ToPyObject for core.eager.StringTensor

* Add debug string for core.eager.StringTensor

* Remove place args of core.eager.StringTensor temporarily

* Fix check string_tensor error

* remove dtype of core.eager.StringTensor

* add core.eager.StringTensor unittest

* remove pstring from VarDesc

* Add InitStringTensorWithStringTensor

* Remove to_string modification

* Remove zero_copy arg from StringTensor creator
This PR is the cherry-pick of #41824

This PR fixes a bug that will cause the Cuda address error. The reason for this bug is that the grid number of the Cuda Kernel had been wrongly set.
…perative_double_grad (#41451) (#41893)

* [DoubleGrad] Enabled double grad test cases in eager_mode for test_imperative_double_grad

* Fixed elementwise issue

* Addressed CI failures
* split reduce_kernel

* rm reduce_kernel in cmake

* split reduce_grad kernels

* fix cmake build error

* format code

* fix standalone_executor_test error
* modify xpu.cmake,*test=kunlun

* modify xpu.cmake,*test=kunlun

* modify xpu.cmake,*test=kunlun

* modify xpu.cmake,*test=kunlun
* [XPUPS]add support for kunlun2 (#40985)


[XPUPS]add support for kunlun2

Co-authored-by: WorgenZhang <[email protected]>

* [XPUPS]fix hashtable_kernel.kps (#41790)

* refactor heter comm kernel

* update. test=develop

* update calc_shard_offset. test=develop

* update xpu kernel. test=develop

* update args of calc_shard_offset

* update. test=develop

* remove customGradMerger

* update. test=develop

* update. test=develop

* fix. test=develop

* update. test=develop

* update. test=develop

* update optimizer kernel

* update. test=develop

* update. test=develop

* update. test=develop

* update. test=develop

* update. test=develop

* update. test=develop

* update. test=develop

* update. test=develop

* fix. test=develop

* fix. test=develop

* add optimizer kernel. test=develop

* fix. test=develop

* fix. test=develop

* fix. test=develop

* fix. test=develop

* fix kunlun not support size_t. test=develop

* fix. test=develop

* fix. test=develop

* fix. test=develop

* fix. test=develop

* fix. test=develop

* fix. test=develop

* fix. test=develop

* fix. test=develop

* fix. test=develop

* update hashtable. test=develop

* update. test=develop

* fix. test=develop

* fix. test=develop

* fix. test=develop

* fix. test=develop

* fix. test=develop

* fix. test=develop

* fix. test=develop

* update. test=develop

* update. test=develop

* fix. test=develop

* fix. test=develop

* fix. test=develop

* fix. test=develop

* fix. test=develop

* fix. test=develop

* fix. test=develop

* fix. test=develop

* fix. test=develop

* fix. test=develop

* fix. test=develop

* fix. test=develop

* fix. test=develop

* fix. test=develop

* fix. test=develop

* fix. test=develop

* template init. test=develop

* hashtable template init. test=develop

* fix. test=develop

* fix. test=devlop

* fix. test=develop

* fix. test=develop

* fix. test=develop

* fix. test=develop

* fix. test=develop

* fix. test=develop

* fix. test=develop

* fix. test=develop

* fix. test=develop

* fix. test=develop

* fix. test=develop

* fix. test=develop

* fix. test=develop

* fix. test=develop

* fix. test=develop

* fix. test=develop

* fix. test=develop

* fix. test=develop

* fix hashtable_kernel. test=develop

* fix. test=develop

* fix. test=develop

* fix. test=develop

* fix. test=develop

Co-authored-by: WorgenZhang <[email protected]>

* [XPUPS]modify xpu_kp.cmake with HETERPS&PSLIB (#41760)

* modify xpu_kp.cmake with HETERPS&PSLIB

* fix. test=develop

* fix. test=develop

* fix. test=develop

* fix. test=develop

Co-authored-by: WorgenZhang <[email protected]>
* fix moe apis (#41650)

* Moe ref (#41836)

* moe ref

* ref commit

* update; document_fix

* update;document_fix

* Moe ref (#41864)

* moe ref

* ref commit; document_fix

* update; document_fix

* update document_fix

* update; document_fix
* [Eager] add _fallback_legacy_dygraph for npu/xpu/rocm

* fix import
@TCChenlong TCChenlong closed this Apr 18, 2022
@ImNoBadBoy
Copy link
Author

ImNoBadBoy commented Oct 11, 2022 via email

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
contributor External developers
Projects
None yet
Development

Successfully merging this pull request may close these issues.