From bf86fe437a3101968f9a3469814e5380df464894 Mon Sep 17 00:00:00 2001 From: zhiqiu Date: Thu, 20 Aug 2020 17:36:43 +0800 Subject: [PATCH 01/28] refine manual seed --- paddle/fluid/framework/generator.cc | 10 ++- paddle/fluid/framework/generator.h | 38 +++++----- paddle/fluid/operators/uniform_random_op.cc | 4 +- paddle/fluid/pybind/generator_py.cc | 24 ++++--- python/paddle/fluid/generator.py | 30 +------- .../fluid/tests/unittests/test_random_seed.py | 69 ++++++++++++++++++- python/paddle/framework/random.py | 23 ++++--- 7 files changed, 127 insertions(+), 71 deletions(-) diff --git a/paddle/fluid/framework/generator.cc b/paddle/fluid/framework/generator.cc index d00e38784c2c0..4fc0f223a759f 100644 --- a/paddle/fluid/framework/generator.cc +++ b/paddle/fluid/framework/generator.cc @@ -12,18 +12,22 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include "paddle/fluid/framework/generator.h" + #include #include #include #include #include -#include "paddle/fluid/framework/generator.h" - namespace paddle { namespace framework { -std::shared_ptr Generator::gen_instance_ = NULL; +const std::shared_ptr& DefaultCPUGenerator() { + static auto default_cpu_generator = + std::make_shared(GetRandomSeed()); + return default_cpu_generator; +} GeneratorState* Generator::GetState() { std::lock_guard lock(this->mutex); diff --git a/paddle/fluid/framework/generator.h b/paddle/fluid/framework/generator.h index 17870782ba72a..848fe98a253e5 100644 --- a/paddle/fluid/framework/generator.h +++ b/paddle/fluid/framework/generator.h @@ -15,6 +15,7 @@ limitations under the License. */ #pragma once #include + #include #include #include // temp for debug @@ -27,6 +28,12 @@ limitations under the License. */ namespace paddle { namespace framework { +static uint64_t GetRandomSeed() { + std::random_device rd; + // double has 53 bit significant, so limit uint64 to 53 bits + return ((((uint64_t)rd()) << 32) + rd()) & 0x1FFFFFFFFFFFFF; +} + struct GeneratorState { int64_t device = -1; uint64_t current_seed = 34342423252; @@ -37,8 +44,16 @@ struct Generator { Generator() { GeneratorState default_gen_state_cpu; default_gen_state_cpu.device = -1; - default_gen_state_cpu.current_seed = 34342423252; - std::seed_seq seq({34342423252}); + default_gen_state_cpu.current_seed = GetRandomSeed(); + std::seed_seq seq({default_gen_state_cpu.current_seed}); + default_gen_state_cpu.cpu_engine = std::mt19937_64(seq); + this->state_ = std::make_shared(default_gen_state_cpu); + } + explicit Generator(uint64_t seed) { + GeneratorState default_gen_state_cpu; + default_gen_state_cpu.device = -1; + default_gen_state_cpu.current_seed = seed; + std::seed_seq seq({seed}); default_gen_state_cpu.cpu_engine = std::mt19937_64(seq); this->state_ = std::make_shared(default_gen_state_cpu); } @@ -67,24 +82,7 @@ struct Generator { bool is_init_py = false; - // CPU Generator singleton - static std::shared_ptr GetInstance() { - if (NULL == gen_instance_) { - gen_instance_.reset(new paddle::framework::Generator()); - } - return gen_instance_; - } - - static std::shared_ptr GetInstanceX() { - if (NULL == gen_instance_) { - gen_instance_.reset(new paddle::framework::Generator()); - } - gen_instance_->is_init_py = true; - return gen_instance_; - } - private: - static std::shared_ptr gen_instance_; std::shared_ptr state_; mutable std::mutex mutex; @@ -92,5 +90,7 @@ struct Generator { : state_(std::make_shared(*(other.state_))) {} }; +const std::shared_ptr& DefaultCPUGenerator(); + } // namespace framework } // namespace paddle diff --git a/paddle/fluid/operators/uniform_random_op.cc b/paddle/fluid/operators/uniform_random_op.cc index a4487cde27799..32dc29e46be55 100644 --- a/paddle/fluid/operators/uniform_random_op.cc +++ b/paddle/fluid/operators/uniform_random_op.cc @@ -12,7 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/uniform_random_op.h" + #include + #include "paddle/fluid/framework/generator.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" @@ -62,7 +64,7 @@ class CPUUniformRandomKernel : public framework::OpKernel { std::uniform_real_distribution dist( static_cast(ctx.Attr("min")), static_cast(ctx.Attr("max"))); - auto gen_ptr = framework::Generator::GetInstance(); + auto gen_ptr = framework::DefaultCPUGenerator(); if (gen_ptr->is_init_py) { std::mt19937_64 &gen_engine = gen_ptr->GetCPUEngine(); // auto gen_engine = gen_ptr_->GetCPUEngine(); diff --git a/paddle/fluid/pybind/generator_py.cc b/paddle/fluid/pybind/generator_py.cc index 3bccd5fb2dd92..f34c5b3d4ae36 100644 --- a/paddle/fluid/pybind/generator_py.cc +++ b/paddle/fluid/pybind/generator_py.cc @@ -29,23 +29,31 @@ namespace py = pybind11; namespace paddle { namespace pybind { -void BindGenerator(py::module* m) { - py::class_(*m, "GeneratorState", ""); - py::class_(*m, "mt19937_64", ""); +void BindGenerator(py::module* m_ptr) { + auto& m = *m_ptr; + py::class_(m, "GeneratorState", ""); + py::class_(m, "mt19937_64", ""); py::class_>( - *m, "Generator") - .def(py::init([]() { return framework::Generator::GetInstanceX(); }), - py::return_value_policy::reference) + m, "Generator") + .def("__init__", + [](framework::Generator& self) { + new (&self) framework::Generator(); + }) .def("get_state", &framework::Generator::GetState, py::return_value_policy::move) .def("set_state", &framework::Generator::SetState) - .def("manual_seed", &framework::Generator::SetCurrentSeed) + .def("manual_seed", + [](std::shared_ptr& self, uint64_t seed) { + self->SetCurrentSeed(seed); + return self; + }) .def("seed", &framework::Generator::Seed) .def("initial_seed", &framework::Generator::GetCurrentSeed) .def("random", &framework::Generator::Random64) .def("get_cpu_engine", &framework::Generator::GetCPUEngine, py::return_value_policy::move) .def("set_cpu_engine", &framework::Generator::SetCPUEngine); + m.def("default_cpu_generator", &framework::DefaultCPUGenerator); } // end Generator } // end namespace pybind -} // end namespace paddle +} // namespace paddle diff --git a/python/paddle/fluid/generator.py b/python/paddle/fluid/generator.py index e11b2e484dce1..8bc6863831112 100644 --- a/python/paddle/fluid/generator.py +++ b/python/paddle/fluid/generator.py @@ -20,41 +20,15 @@ default_rng_seed_val = 34342423252 -class Generator(object): +class Generator(core.Generator): """Generator class""" def __init__(self, device="CPU"): """init""" self.device = device - seed_in = default_rng_seed_val if self.device == "CPU": - self.generator = core.Generator() - # self.generator.manual_seed(seed_in) + super(Generator, self).__init__() else: raise ValueError( "generator class with device %s does not exist, currently only support generator with device 'CPU' " % device) - - def get_state(self): - return self.generator.get_state() - - def set_state(self, state): - self.generator.set_state(state) - - def manual_seed(self, seed): - self.generator.manual_seed(seed) - - def seed(self): - return self.generator.seed() - - def initial_seed(self): - return self.generator.initial_seed() - - def random(self): - return self.generator.random() - - def get_cpu_engine(self): - return self.generator.get_cpu_engine() - - def set_cpu_engine(self, cpu_engine): - self.generator.set_cpu_engine(cpu_engine) diff --git a/python/paddle/fluid/tests/unittests/test_random_seed.py b/python/paddle/fluid/tests/unittests/test_random_seed.py index 2933abe46c1b8..1310ef01f04bd 100644 --- a/python/paddle/fluid/tests/unittests/test_random_seed.py +++ b/python/paddle/fluid/tests/unittests/test_random_seed.py @@ -32,19 +32,23 @@ class TestGeneratorSeed(unittest.TestCase): def test_generator_uniform_random_dygraph(self): """Test Generator seed.""" - gen = generator.Generator() fluid.enable_dygraph() - gen.manual_seed(12312321111) + gen = paddle.manual_seed(12312321111) + print(gen.initial_seed()) x = fluid.layers.uniform_random([10], dtype="float32", min=0.0, max=1.0) st1 = gen.get_state() + print(st1) x1 = fluid.layers.uniform_random( [10], dtype="float32", min=0.0, max=1.0) + print(gen.initial_seed()) gen.set_state(st1) + print(gen.get_state()) x2 = fluid.layers.uniform_random( [10], dtype="float32", min=0.0, max=1.0) - gen.manual_seed(12312321111) + print(gen.initial_seed()) + paddle.manual_seed(12312321111) x3 = fluid.layers.uniform_random( [10], dtype="float32", min=0.0, max=1.0) x_np = x.numpy() @@ -228,6 +232,65 @@ def test_generator_randint_dygraph(self): self.assertTrue(np.allclose(x1_np, x2_np)) self.assertTrue(np.allclose(x_np, x3_np)) + def test_generator_uniform_random_static(self): + + fluid.disable_dygraph() + + gen = generator.Generator() + gen.manual_seed(123123143) + + startup_program = fluid.Program() + train_program = fluid.Program() + with fluid.program_guard(train_program, startup_program): + # example 1: + # attr shape is a list which doesn't contain tensor Variable. + result_1 = fluid.layers.uniform_random(shape=[3, 4]) + result_2 = fluid.layers.uniform_random(shape=[3, 4]) + + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(startup_program) + out1 = exe.run(train_program, + feed={}, + fetch_list=[result_1, result_2]) + #gen.set_state(cur_state) + gen.manual_seed(123123143) + out2 = exe.run(train_program, + feed={}, + fetch_list=[result_1, result_2]) + + out1_res1 = np.array(out1[0]) + out1_res2 = np.array(out1[1]) + out2_res1 = np.array(out2[0]) + out2_res2 = np.array(out2[1]) + + if not core.is_compiled_with_cuda(): + self.assertTrue(np.allclose(out1_res1, out2_res1)) + self.assertTrue(np.allclose(out1_res2, out2_res2)) + self.assertTrue(not np.allclose(out1_res2, out1_res1)) + + def test_generator_randint_dygraph(self): + """Test Generator seed.""" + gen = generator.Generator() + + fluid.enable_dygraph() + + gen.manual_seed(12312321111) + x = paddle.randint(low=1) + st1 = gen.get_state() + x1 = paddle.randint(low=1) + gen.set_state(st1) + x2 = paddle.randint(low=1) + gen.manual_seed(12312321111) + x3 = paddle.randint(low=1) + x_np = x.numpy() + x1_np = x1.numpy() + x2_np = x2.numpy() + x3_np = x3.numpy() + if not core.is_compiled_with_cuda(): + self.assertTrue(np.allclose(x1_np, x2_np)) + self.assertTrue(np.allclose(x_np, x3_np)) + + def test_generator_ranint_static(self): fluid.disable_dygraph() diff --git a/python/paddle/framework/random.py b/python/paddle/framework/random.py index 1bb13294805ef..5cf633aa4b36c 100644 --- a/python/paddle/framework/random.py +++ b/python/paddle/framework/random.py @@ -14,30 +14,35 @@ # TODO: define random api import paddle.fluid as fluid +from paddle.fluid import core __all__ = ['manual_seed'] def manual_seed(seed): """ - :alias_main: paddle.manual_seed - :alias: paddle.manual_seed,paddle.framework.random.manual_seed - Set global manual seed for program + Sets global seed for generating random numbers. Args: - manual_seed(int): random seed for program + seed(int): The random seed to set. It is recommend to set a large int number. Returns: - None. + Generator: a generator object. Examples: .. code-block:: python - from paddle.framework import manual_seed - manual_seed(102) + import paddle + paddle.manual_seed(102) + """ + #TODO(zhiqiu): 1. remove program.random_seed when all random-related op upgrade + # 2. support gpu generator by global device + + seed = int(seed) + fluid.default_main_program().random_seed = seed fluid.default_startup_program().random_seed = seed - program = fluid.Program() - program.global_seed(seed) + + return core.default_cpu_generator().manual_seed(seed) From b6a6e5e688d19221fe034e46ba0736c8144e2458 Mon Sep 17 00:00:00 2001 From: zhiqiu Date: Thu, 20 Aug 2020 20:53:24 +0800 Subject: [PATCH 02/28] fix ci problem --- paddle/fluid/framework/generator.cc | 1 + paddle/fluid/pybind/generator_py.cc | 7 ++++- .../fluid/tests/unittests/test_random_seed.py | 26 +++++++++++++++---- python/paddle/framework/random.py | 2 ++ 4 files changed, 30 insertions(+), 6 deletions(-) diff --git a/paddle/fluid/framework/generator.cc b/paddle/fluid/framework/generator.cc index 4fc0f223a759f..00af213a98cd6 100644 --- a/paddle/fluid/framework/generator.cc +++ b/paddle/fluid/framework/generator.cc @@ -26,6 +26,7 @@ namespace framework { const std::shared_ptr& DefaultCPUGenerator() { static auto default_cpu_generator = std::make_shared(GetRandomSeed()); + default_cpu_generator->is_init_py = true; return default_cpu_generator; } diff --git a/paddle/fluid/pybind/generator_py.cc b/paddle/fluid/pybind/generator_py.cc index f34c5b3d4ae36..3ad672db2ecd9 100644 --- a/paddle/fluid/pybind/generator_py.cc +++ b/paddle/fluid/pybind/generator_py.cc @@ -31,7 +31,12 @@ namespace paddle { namespace pybind { void BindGenerator(py::module* m_ptr) { auto& m = *m_ptr; - py::class_(m, "GeneratorState", ""); + py::class_>(m, "GeneratorState") + .def("current_seed", + [](std::shared_ptr& self) { + return self->current_seed; + }); py::class_(m, "mt19937_64", ""); py::class_>( m, "Generator") diff --git a/python/paddle/fluid/tests/unittests/test_random_seed.py b/python/paddle/fluid/tests/unittests/test_random_seed.py index 1310ef01f04bd..7160c591351bf 100644 --- a/python/paddle/fluid/tests/unittests/test_random_seed.py +++ b/python/paddle/fluid/tests/unittests/test_random_seed.py @@ -36,18 +36,18 @@ def test_generator_uniform_random_dygraph(self): fluid.enable_dygraph() gen = paddle.manual_seed(12312321111) - print(gen.initial_seed()) + x = fluid.layers.uniform_random([10], dtype="float32", min=0.0, max=1.0) st1 = gen.get_state() - print(st1) + x1 = fluid.layers.uniform_random( [10], dtype="float32", min=0.0, max=1.0) - print(gen.initial_seed()) + gen.set_state(st1) - print(gen.get_state()) + x2 = fluid.layers.uniform_random( [10], dtype="float32", min=0.0, max=1.0) - print(gen.initial_seed()) + paddle.manual_seed(12312321111) x3 = fluid.layers.uniform_random( [10], dtype="float32", min=0.0, max=1.0) @@ -64,8 +64,12 @@ def test_generator_uniform_random_static(self): fluid.disable_dygraph() +<<<<<<< 976fd4b093360e81d01c6aa4fc93397688c45a00 gen = generator.Generator() gen.manual_seed(123123143) +======= + gen = paddle.manual_seed(123123143) +>>>>>>> fix ci problem startup_program = fluid.Program() train_program = fluid.Program() @@ -96,6 +100,7 @@ def test_generator_uniform_random_static(self): self.assertTrue(np.allclose(out1_res2, out2_res2)) self.assertTrue(not np.allclose(out1_res2, out1_res1)) +<<<<<<< 976fd4b093360e81d01c6aa4fc93397688c45a00 def test_gen_dropout_dygraph(self): gen = generator.Generator() @@ -275,6 +280,14 @@ def test_generator_randint_dygraph(self): fluid.enable_dygraph() gen.manual_seed(12312321111) +======= + def test_generator_randint_dygraph(self): + """Test Generator seed.""" + gen = paddle.manual_seed(12312321111) + + fluid.enable_dygraph() + +>>>>>>> fix ci problem x = paddle.randint(low=1) st1 = gen.get_state() x1 = paddle.randint(low=1) @@ -289,6 +302,7 @@ def test_generator_randint_dygraph(self): if not core.is_compiled_with_cuda(): self.assertTrue(np.allclose(x1_np, x2_np)) self.assertTrue(np.allclose(x_np, x3_np)) +<<<<<<< 976fd4b093360e81d01c6aa4fc93397688c45a00 def test_generator_ranint_static(self): @@ -520,6 +534,8 @@ def test_gen_TruncatedNormal_initializer(self): self.assertTrue(np.allclose(out1_res1, out2_res1)) self.assertTrue(np.allclose(out1_res2, out2_res2)) self.assertTrue(not np.allclose(out1_res2, out1_res1)) +======= +>>>>>>> fix ci problem if __name__ == "__main__": diff --git a/python/paddle/framework/random.py b/python/paddle/framework/random.py index 5cf633aa4b36c..62721915b70f0 100644 --- a/python/paddle/framework/random.py +++ b/python/paddle/framework/random.py @@ -44,5 +44,7 @@ def manual_seed(seed): fluid.default_main_program().random_seed = seed fluid.default_startup_program().random_seed = seed + program = fluid.Program() + program.global_seed(seed) return core.default_cpu_generator().manual_seed(seed) From 9728487962b9f36f3af962dd8652d8354658c964 Mon Sep 17 00:00:00 2001 From: zhiqiu Date: Fri, 21 Aug 2020 03:18:31 +0800 Subject: [PATCH 03/28] fix unittests --- paddle/fluid/operators/uniform_random_op.cu | 14 ++++++-------- .../contrib/tests/test_weight_decay_extend.py | 6 ++++-- .../tests/unittests/dygraph_to_static/test_bmn.py | 4 ++-- .../tests/unittests/dygraph_to_static/test_lac.py | 4 ++-- .../unittests/dygraph_to_static/test_mobile_net.py | 4 ++-- .../unittests/dygraph_to_static/test_ptb_lm.py | 5 ++--- .../test_reinforcement_learning.py | 3 +-- .../unittests/dygraph_to_static/test_resnet.py | 3 +-- .../unittests/dygraph_to_static/test_se_resnet.py | 3 +-- .../unittests/dygraph_to_static/test_simnet.py | 3 +-- .../dygraph_to_static/test_transformer.py | 12 ++++++------ .../tests/unittests/dygraph_to_static/test_tsm.py | 5 ++--- .../test_buffer_shared_memory_reuse_pass.py | 4 ++-- .../fluid/tests/unittests/test_compiled_program.py | 7 +++---- .../tests/unittests/test_decoupled_py_reader.py | 3 +-- .../unittests/test_eager_deletion_padding_rnn.py | 3 ++- .../unittests/test_embedding_id_stop_gradient.py | 8 ++++---- python/paddle/fluid/tests/unittests/test_fc_op.py | 4 ++-- .../tests/unittests/test_fuse_all_reduce_pass.py | 1 + .../fluid/tests/unittests/test_fuse_bn_act_pass.py | 3 +-- .../tests/unittests/test_fuse_optimizer_pass.py | 1 + .../tests/unittests/test_generator_dataloader.py | 3 +-- .../fluid/tests/unittests/test_hsigmoid_op.py | 3 ++- .../fluid/tests/unittests/test_imperative_gan.py | 12 +++--------- .../fluid/tests/unittests/test_imperative_gnn.py | 11 +++-------- .../test_imperative_lod_tensor_to_selected_rows.py | 7 +++---- .../test_imperative_ocr_attention_model.py | 6 ++---- .../tests/unittests/test_imperative_optimizer.py | 9 +++------ .../tests/unittests/test_imperative_ptb_rnn.py | 7 +++---- .../test_imperative_ptb_rnn_sorted_gradient.py | 8 ++++---- .../tests/unittests/test_imperative_se_resnext.py | 6 ++---- python/paddle/fluid/tests/unittests/test_layers.py | 6 ++---- python/paddle/incubate/hapi/tests/test_model.py | 10 ++++------ python/paddle/incubate/hapi/tests/test_text.py | 5 +++-- 34 files changed, 82 insertions(+), 111 deletions(-) diff --git a/paddle/fluid/operators/uniform_random_op.cu b/paddle/fluid/operators/uniform_random_op.cu index c024bb87b09c0..4df1e0ffeb975 100644 --- a/paddle/fluid/operators/uniform_random_op.cu +++ b/paddle/fluid/operators/uniform_random_op.cu @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include #include + #include "paddle/fluid/framework/generator.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" @@ -88,15 +89,12 @@ class GPUUniformRandomKernel : public framework::OpKernel { } T* data = tensor->mutable_data(context.GetPlace()); unsigned int seed = static_cast(context.Attr("seed")); - if (framework::Generator::GetInstance()->is_init_py) { - seed = static_cast( - framework::Generator::GetInstance()->GetCurrentSeed()); - } else { - if (seed == 0) { - std::random_device rd; - seed = rd(); - } + + if (seed == 0) { + std::random_device rd; + seed = rd(); } + T min = static_cast(context.Attr("min")); T max = static_cast(context.Attr("max")); unsigned int diag_num = diff --git a/python/paddle/fluid/contrib/tests/test_weight_decay_extend.py b/python/paddle/fluid/contrib/tests/test_weight_decay_extend.py index 2b331308de5ee..db32602eea5b5 100644 --- a/python/paddle/fluid/contrib/tests/test_weight_decay_extend.py +++ b/python/paddle/fluid/contrib/tests/test_weight_decay_extend.py @@ -92,9 +92,10 @@ def run_program(self, place, feed_list): return param_sum def check_weight_decay(self, place, model): + paddle.manual_seed(1) main_prog = fluid.framework.Program() startup_prog = fluid.framework.Program() - startup_prog.random_seed = 1 + with prog_scope_guard(main_prog=main_prog, startup_prog=startup_prog): data = fluid.layers.data( name="words", shape=[1], dtype="int64", lod_level=1) @@ -113,9 +114,10 @@ def check_weight_decay(self, place, model): return param_sum def check_weight_decay2(self, place, model): + paddle.manual_seed(1) main_prog = fluid.framework.Program() startup_prog = fluid.framework.Program() - startup_prog.random_seed = 1 + with prog_scope_guard(main_prog=main_prog, startup_prog=startup_prog): data = fluid.layers.data( name="words", shape=[1], dtype="int64", lod_level=1) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_bmn.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_bmn.py index dd58a49bb55c2..80790d9a36852 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_bmn.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_bmn.py @@ -560,8 +560,8 @@ def train_bmn(args, place, to_static): loss_data = [] with fluid.dygraph.guard(place): - fluid.default_main_program().random_seed = SEED - fluid.default_startup_program().random_seed = SEED + paddle.manual_seed(SEED) + paddle.framework.random._manual_program_seed(seed) global local_random local_random = np.random.RandomState(SEED) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_lac.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_lac.py index 0e2bac9fa5b5c..3e85a278c679e 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_lac.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_lac.py @@ -21,6 +21,7 @@ import os os.environ["CUDA_VISIBLE_DEVICES"] = "2" +import paddle import paddle.fluid as fluid from paddle.fluid.dygraph import to_variable from paddle.fluid.dygraph import Embedding, Linear, GRUUnit @@ -448,8 +449,7 @@ def do_train(args, to_static): place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda( ) else fluid.CPUPlace() with fluid.dygraph.guard(place): - fluid.default_startup_program().random_seed = SEED - fluid.default_main_program().random_seed = SEED + paddle.manual_seed(SEED) reader = get_random_input_data(args.batch_size, args.vocab_size, args.num_labels) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mobile_net.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mobile_net.py index 5ec3de5871dd6..1d2eb6eb137bf 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mobile_net.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mobile_net.py @@ -14,6 +14,7 @@ import time import numpy as np +import paddle import paddle.fluid as fluid from paddle.fluid.initializer import MSRA from paddle.fluid.param_attr import ParamAttr @@ -447,8 +448,7 @@ def train_mobilenet(args, to_static): with fluid.dygraph.guard(args.place): np.random.seed(SEED) - fluid.default_startup_program().random_seed = SEED - fluid.default_main_program().random_seed = SEED + paddle.manual_seed(SEED) if args.model == "MobileNetV1": net = MobileNetV1(class_dim=args.class_dim, scale=1.0) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ptb_lm.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ptb_lm.py index 790319936ac01..6786a02b9fac8 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ptb_lm.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ptb_lm.py @@ -19,7 +19,7 @@ import unittest import numpy as np - +import paddle import paddle.fluid as fluid from paddle.fluid.dygraph.dygraph_to_static import ProgramTranslator from paddle.fluid.dygraph.base import to_variable @@ -218,8 +218,7 @@ def train(place): batch_num = 200 with fluid.dygraph.guard(place): - fluid.default_startup_program().random_seed = SEED - fluid.default_main_program().random_seed = SEED + paddle.manual_seed(SEED) ptb_model = PtbModel( hidden_size=hidden_size, vocab_size=vocab_size, diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_reinforcement_learning.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_reinforcement_learning.py index 4813930159744..6bbe3d81a883e 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_reinforcement_learning.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_reinforcement_learning.py @@ -64,8 +64,7 @@ def train(args, place, to_static): env.seed(SEED) with fluid.dygraph.guard(place): - fluid.default_main_program().random_seed = SEED - fluid.default_startup_program().random_seed = SEED + paddle.manual_seed(SEED) local_random = np.random.RandomState(SEED) policy = Policy() diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_resnet.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_resnet.py index 46eb2b42e9265..674d7dd99ced4 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_resnet.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_resnet.py @@ -215,8 +215,7 @@ def train(to_static): """ with fluid.dygraph.guard(place): np.random.seed(SEED) - fluid.default_startup_program().random_seed = SEED - fluid.default_main_program().random_seed = SEED + paddle.manual_seed(SEED) train_reader = paddle.batch( reader_decorator(paddle.dataset.flowers.train(use_xmap=False)), diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_se_resnet.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_se_resnet.py index 30cba78fec19c..9746eca9c9709 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_se_resnet.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_se_resnet.py @@ -331,8 +331,7 @@ def train(train_reader, to_static): np.random.seed(SEED) with fluid.dygraph.guard(place): - fluid.default_startup_program().random_seed = SEED - fluid.default_main_program().random_seed = SEED + paddle.manual_seed(SEED) se_resnext = SeResNeXt() optimizer = optimizer_setting(train_parameters, se_resnext.parameters()) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_simnet.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_simnet.py index 552a6307f3337..d5b3bfef94912 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_simnet.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_simnet.py @@ -108,8 +108,7 @@ def train(conf_dict, to_static): place = fluid.CPUPlace() with fluid.dygraph.guard(place): - fluid.default_startup_program().random_seed = SEED - fluid.default_main_program().random_seed = SEED + paddle.manual_seed(SEED) conf_dict['dict_size'] = len(vocab) conf_dict['seq_len'] = args.seq_len diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_transformer.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_transformer.py index 7aa465949eb70..735e131c141a6 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_transformer.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_transformer.py @@ -18,6 +18,7 @@ import unittest import numpy as np +import paddle import paddle.fluid as fluid import transformer_util as util @@ -31,10 +32,10 @@ def train_static(args, batch_generator): + paddle.manual_seed(SEED) train_prog = fluid.Program() startup_prog = fluid.Program() - train_prog.random_seed = SEED - startup_prog.random_seed = SEED + with fluid.program_guard(train_prog, startup_prog): with fluid.unique_name.guard(): # define input and reader @@ -128,8 +129,7 @@ def train_static(args, batch_generator): def train_dygraph(args, batch_generator): with fluid.dygraph.guard(place): if SEED is not None: - fluid.default_main_program().random_seed = SEED - fluid.default_startup_program().random_seed = SEED + paddle.manual_seed(SEED) # define data loader train_loader = fluid.io.DataLoader.from_generator(capacity=10) train_loader.set_batch_generator(batch_generator, places=place) @@ -220,7 +220,7 @@ def train_dygraph(args, batch_generator): def predict_dygraph(args, batch_generator): with fluid.dygraph.guard(place): - fluid.default_main_program().random_seed = SEED + paddle.manual_seed(SEED) # define data loader test_loader = fluid.io.DataLoader.from_generator(capacity=10) @@ -291,7 +291,7 @@ def predict_dygraph(args, batch_generator): def predict_static(args, batch_generator): test_prog = fluid.Program() with fluid.program_guard(test_prog): - test_prog.random_seed = SEED + paddle.manual_seed(SEED) # define input and reader input_field_names = util.encoder_data_input_fields + util.fast_decoder_data_input_fields diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tsm.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tsm.py index 13a97fb7478db..45b8083009cde 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tsm.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tsm.py @@ -20,7 +20,7 @@ import sys import time import unittest - +import paddle import paddle.fluid as fluid from paddle.fluid.dygraph import declarative, ProgramTranslator, to_variable from paddle.fluid.dygraph.nn import Conv2D, BatchNorm, Linear, Pool2D @@ -272,8 +272,7 @@ def train(args, fake_data_reader, to_static): random.seed(0) np.random.seed(0) with fluid.dygraph.guard(place): - fluid.default_startup_program().random_seed = 1000 - fluid.default_main_program().random_seed = 1000 + paddle.manual_seed(1000) video_model = TSM_ResNet("TSM", train_config, 'Train') diff --git a/python/paddle/fluid/tests/unittests/test_buffer_shared_memory_reuse_pass.py b/python/paddle/fluid/tests/unittests/test_buffer_shared_memory_reuse_pass.py index 671efd8c72155..0cbd2c8f8f577 100644 --- a/python/paddle/fluid/tests/unittests/test_buffer_shared_memory_reuse_pass.py +++ b/python/paddle/fluid/tests/unittests/test_buffer_shared_memory_reuse_pass.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import paddle import paddle.fluid as fluid from paddle.fluid.framework import Parameter import numpy as np @@ -44,10 +45,9 @@ def setUp(self): def build_program_and_scope(self): self.place = fluid.CUDAPlace(0) if self.use_cuda else fluid.CPUPlace() + paddle.manual_seed(1) startup_program = fluid.Program() main_program = fluid.Program() - startup_program.random_seed = 1 - main_program.random_seed = 1 scope = fluid.Scope() with fluid.program_guard(main_program, startup_program): diff --git a/python/paddle/fluid/tests/unittests/test_compiled_program.py b/python/paddle/fluid/tests/unittests/test_compiled_program.py index 8430f39578047..691890520c6ad 100644 --- a/python/paddle/fluid/tests/unittests/test_compiled_program.py +++ b/python/paddle/fluid/tests/unittests/test_compiled_program.py @@ -16,6 +16,7 @@ import unittest import numpy as np +import paddle import paddle.fluid as fluid from paddle.fluid import core from test_imperative_base import new_program_scope @@ -29,8 +30,7 @@ def setUp(self): self.label = np.random.randint( low=0, high=10, size=[16, 1], dtype=np.int64) with new_program_scope(): - fluid.default_startup_program().random_seed = self.seed - fluid.default_main_program().random_seed = self.seed + paddle.manual_seed(self.seed) place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda( ) else fluid.CPUPlace() exe = fluid.Executor(place) @@ -46,8 +46,7 @@ def setUp(self): def test_compiled_program_base(self): with new_program_scope(): - fluid.default_startup_program().random_seed = self.seed - fluid.default_main_program().random_seed = self.seed + paddle.manual_seed(self.seed) place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda( ) else fluid.CPUPlace() exe = fluid.Executor(place) diff --git a/python/paddle/fluid/tests/unittests/test_decoupled_py_reader.py b/python/paddle/fluid/tests/unittests/test_decoupled_py_reader.py index a16f21c0f97c0..9fd8fa3b8bd9e 100644 --- a/python/paddle/fluid/tests/unittests/test_decoupled_py_reader.py +++ b/python/paddle/fluid/tests/unittests/test_decoupled_py_reader.py @@ -34,10 +34,9 @@ def random_reader(): def simple_fc_net(places, use_legacy_py_reader, use_double_buffer): + paddle.manual_seed(1) startup_prog = fluid.Program() main_prog = fluid.Program() - startup_prog.random_seed = 1 - main_prog.random_seed = 1 with fluid.unique_name.guard(): with fluid.program_guard(main_prog, startup_prog): diff --git a/python/paddle/fluid/tests/unittests/test_eager_deletion_padding_rnn.py b/python/paddle/fluid/tests/unittests/test_eager_deletion_padding_rnn.py index 6c0bb97bf6f14..e0c0277270b40 100644 --- a/python/paddle/fluid/tests/unittests/test_eager_deletion_padding_rnn.py +++ b/python/paddle/fluid/tests/unittests/test_eager_deletion_padding_rnn.py @@ -18,6 +18,7 @@ import unittest import numpy as np +import paddle import paddle.fluid as fluid import paddle.fluid.core as core import paddle.fluid.layers as layers @@ -465,9 +466,9 @@ def set_customed_config(self): pass def _prepare_program(self, config, parallel=True): + paddle.manual_seed(config.random_seed) self.main_program = fluid.Program() self.startup_program = fluid.Program() - self.startup_program.random_seed = config.random_seed with fluid.program_guard(self.main_program, self.startup_program): with fluid.unique_name.guard(): res_vars = lm_model( diff --git a/python/paddle/fluid/tests/unittests/test_embedding_id_stop_gradient.py b/python/paddle/fluid/tests/unittests/test_embedding_id_stop_gradient.py index 5a562dc14650a..e33cd81bbe1a2 100644 --- a/python/paddle/fluid/tests/unittests/test_embedding_id_stop_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_embedding_id_stop_gradient.py @@ -13,6 +13,7 @@ # limitations under the License. import numpy as np +import paddle import paddle.fluid as fluid import six import unittest @@ -37,13 +38,12 @@ def test_check_grad(self): self.assertTrue(np.array_equal(grad_value1, grad_value2)) def run_program(self, place, stop_gradient=False): + np.random.seed(1) + paddle.manual_seed(1) + startup_program = fluid.Program() main_program = fluid.Program() - np.random.seed(1) - startup_program.random_seed = 1 - main_program.random_seed = 1 - scope = fluid.Scope() with fluid.program_guard(main_program, startup_program): with fluid.scope_guard(scope): diff --git a/python/paddle/fluid/tests/unittests/test_fc_op.py b/python/paddle/fluid/tests/unittests/test_fc_op.py index e5a7e6c702aec..f463353bf1f31 100644 --- a/python/paddle/fluid/tests/unittests/test_fc_op.py +++ b/python/paddle/fluid/tests/unittests/test_fc_op.py @@ -13,6 +13,7 @@ # limitations under the License. import unittest +import paddle import numpy as np from op_test import OpTest import paddle.fluid as fluid @@ -135,10 +136,9 @@ def config(self): class TestFcOp_NumFlattenDims_NegOne(unittest.TestCase): def test_api(self): + paddle.manual_seed(SEED) startup_program = Program() main_program = Program() - startup_program.random_seed = SEED - main_program.random_seed = SEED with program_guard(main_program, startup_program): input = np.random.random([2, 2, 25]).astype("float32") diff --git a/python/paddle/fluid/tests/unittests/test_fuse_all_reduce_pass.py b/python/paddle/fluid/tests/unittests/test_fuse_all_reduce_pass.py index 06f8da84a28d2..47671ab3a85e8 100644 --- a/python/paddle/fluid/tests/unittests/test_fuse_all_reduce_pass.py +++ b/python/paddle/fluid/tests/unittests/test_fuse_all_reduce_pass.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + from simple_nets import simple_fc_net, fc_with_batchnorm, init_data, bow_net from fake_reader import fake_imdb_reader from parallel_executor_test_base import TestParallelExecutorBase diff --git a/python/paddle/fluid/tests/unittests/test_fuse_bn_act_pass.py b/python/paddle/fluid/tests/unittests/test_fuse_bn_act_pass.py index 62eef67a5695f..ebfcf4c244d0c 100644 --- a/python/paddle/fluid/tests/unittests/test_fuse_bn_act_pass.py +++ b/python/paddle/fluid/tests/unittests/test_fuse_bn_act_pass.py @@ -19,8 +19,7 @@ class TestFuseBatchNormActPass(unittest.TestCase): def build_program(self, main_program, startup_program, use_cuda, seed=1): - main_program.random_seed = seed - startup_program.random_seed = seed + paddle.manual_seed(seed) with fluid.program_guard(main_program, startup_program): x = fluid.layers.data(name='x', shape=[1, 28, 28], dtype='float32') y = fluid.layers.data(name="y", shape=[1], dtype='int64') diff --git a/python/paddle/fluid/tests/unittests/test_fuse_optimizer_pass.py b/python/paddle/fluid/tests/unittests/test_fuse_optimizer_pass.py index b47bcd2a032a3..a22daeedd09e9 100644 --- a/python/paddle/fluid/tests/unittests/test_fuse_optimizer_pass.py +++ b/python/paddle/fluid/tests/unittests/test_fuse_optimizer_pass.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + from simple_nets import simple_fc_net, fc_with_batchnorm, init_data, bow_net from fake_reader import fake_imdb_reader from parallel_executor_test_base import TestParallelExecutorBase diff --git a/python/paddle/fluid/tests/unittests/test_generator_dataloader.py b/python/paddle/fluid/tests/unittests/test_generator_dataloader.py index 4f0beb8c0dcd5..e1e0edc52629a 100644 --- a/python/paddle/fluid/tests/unittests/test_generator_dataloader.py +++ b/python/paddle/fluid/tests/unittests/test_generator_dataloader.py @@ -35,10 +35,9 @@ def random_reader(): def simple_fc_net(places, use_legacy_py_reader, use_double_buffer): + paddle.manual_seed(1) startup_prog = fluid.Program() main_prog = fluid.Program() - startup_prog.random_seed = 1 - main_prog.random_seed = 1 with fluid.unique_name.guard(): with fluid.program_guard(main_prog, startup_prog): diff --git a/python/paddle/fluid/tests/unittests/test_hsigmoid_op.py b/python/paddle/fluid/tests/unittests/test_hsigmoid_op.py index 5777bb3c6f5e3..5c9867e681524 100644 --- a/python/paddle/fluid/tests/unittests/test_hsigmoid_op.py +++ b/python/paddle/fluid/tests/unittests/test_hsigmoid_op.py @@ -16,6 +16,7 @@ import unittest import numpy as np +import paddle import paddle.fluid.core as core import paddle.fluid as fluid from paddle.fluid import Program, program_guard @@ -266,8 +267,8 @@ def hs_net_conf(self, is_sparse): def training_test(self, is_sparse): with fluid.program_guard(fluid.Program(), fluid.Program()): + paddle.manual_seed(1) start_up = fluid.default_startup_program() - start_up.random_seed = 1 # Fix random seed x = np.arange(6).reshape(6) path_table = np.array([(1, 2, -1), (1, 2, -1)]).astype('int64') path_code = np.array([(1, 0, -1), (0, 0, -1)]).astype('int64') diff --git a/python/paddle/fluid/tests/unittests/test_imperative_gan.py b/python/paddle/fluid/tests/unittests/test_imperative_gan.py index 80bdf2ea8a898..881e74a9fae44 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_gan.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_gan.py @@ -56,13 +56,10 @@ def forward(self, inputs): class TestDygraphGAN(unittest.TestCase): def test_gan_float32(self): seed = 90 - + paddle.manual_seed(1) startup = fluid.Program() - startup.random_seed = seed discriminate_p = fluid.Program() generate_p = fluid.Program() - discriminate_p.random_seed = seed - generate_p.random_seed = seed scope = fluid.core.Scope() with new_program_scope( @@ -133,8 +130,7 @@ def test_gan_float32(self): dy_params = dict() with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(1) discriminator = Discriminator() generator = Generator() @@ -177,10 +173,8 @@ def test_gan_float32(self): dy_params2 = dict() with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed fluid.set_flags({'FLAGS_sort_sum_gradient': True}) - + paddle.manual_seed(1) discriminator2 = Discriminator() generator2 = Generator() sgd2 = SGDOptimizer( diff --git a/python/paddle/fluid/tests/unittests/test_imperative_gnn.py b/python/paddle/fluid/tests/unittests/test_imperative_gnn.py index 01f3c02774698..533db6b12c936 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_gnn.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_gnn.py @@ -61,12 +61,9 @@ def forward(self, x, adj): class TestDygraphGNN(unittest.TestCase): def test_gnn_float32(self): - seed = 90 - + paddle.manual_seed(90) startup = fluid.Program() - startup.random_seed = seed main = fluid.Program() - main.random_seed = seed scope = fluid.core.Scope() with new_program_scope(main=main, startup=startup, scope=scope): @@ -114,8 +111,7 @@ def test_gnn_float32(self): scope.find_var(model.gc.weight.name).get_tensor()) with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(90) features = np.ones([1, 100, 50], dtype=np.float32) # Use selected rows when it's supported. @@ -140,8 +136,7 @@ def test_gnn_float32(self): model_gc_weight_value = model.gc.weight.numpy() with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(90) features2 = np.ones([1, 100, 50], dtype=np.float32) # Use selected rows when it's supported. diff --git a/python/paddle/fluid/tests/unittests/test_imperative_lod_tensor_to_selected_rows.py b/python/paddle/fluid/tests/unittests/test_imperative_lod_tensor_to_selected_rows.py index 6349d71760934..2e962844c236a 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_lod_tensor_to_selected_rows.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_lod_tensor_to_selected_rows.py @@ -15,6 +15,7 @@ from __future__ import print_function import unittest +import paddle import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid.dygraph.nn import Embedding @@ -94,8 +95,7 @@ def simple_net_float32(self, is_sparse, dtype): for is_sort_sum_gradient in [True, False]: with fluid.dygraph.guard(place): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) simple_net = SimpleNet( hidden_size=hidden_size, @@ -139,8 +139,7 @@ def simple_net_float32(self, is_sparse, dtype): dy_loss_value = dy_loss.numpy() with new_program_scope(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) simple_net = SimpleNet( hidden_size=hidden_size, diff --git a/python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py b/python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py index 499a4311f6e17..d92e96ecb2feb 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py @@ -401,9 +401,8 @@ def test_while_op(self): dtype='int64').reshape([1, Config.max_length]))) with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed fluid.set_flags({'FLAGS_sort_sum_gradient': True}) + paddle.manual_seed(seed) ocr_attention = OCRAttention() if Config.learning_rate_decay == "piecewise_decay": @@ -453,8 +452,7 @@ def test_while_op(self): dy_param_value[param.name] = param.numpy() with new_program_scope(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) exe = fluid.Executor(fluid.CPUPlace( ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) ocr_attention = OCRAttention() diff --git a/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py b/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py index a7783afc5cff3..71e1e8e081cab 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py @@ -74,8 +74,7 @@ def _check_exception(self, exception_message, place=None): with fluid.dygraph.guard(place): try: - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) mlp = MLP() optimizer = self.get_optimizer_dygraph( parameter_list=mlp.parameters()) @@ -91,8 +90,7 @@ def _check_mlp(self, place=None): ) else fluid.CUDAPlace(0) with fluid.dygraph.guard(place): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) mlp = MLP() optimizer = self.get_optimizer_dygraph( @@ -132,8 +130,7 @@ def _check_mlp(self, place=None): dy_param_value[param.name] = param.numpy() with new_program_scope(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) if place == None: place = fluid.CPUPlace() if not core.is_compiled_with_cuda( diff --git a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py index bd629f5f4a69a..bf5fc13617460 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py @@ -15,6 +15,7 @@ from __future__ import print_function import unittest +import paddle import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid.dygraph.nn import Embedding @@ -225,8 +226,7 @@ def ptb_rnn_cpu_float32(self, is_sparse): traced_layer = None with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) # TODO: marsyang1993 Change seed to ptb_model = PtbModel( hidden_size=hidden_size, @@ -293,8 +293,7 @@ def ptb_rnn_cpu_float32(self, is_sparse): dy_last_hidden_value = last_hidden.numpy() with new_program_scope(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) ptb_model = PtbModel( hidden_size=hidden_size, vocab_size=vocab_size, diff --git a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn_sorted_gradient.py b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn_sorted_gradient.py index 526c1706e2d08..a02b7db61c2df 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn_sorted_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn_sorted_gradient.py @@ -15,6 +15,7 @@ from __future__ import print_function import unittest +import paddle import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid.dygraph.nn import Embedding @@ -43,9 +44,9 @@ def ptb_rnn_sort_gradient_cpu_float32(self, is_sparse): batch_num = 200 with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed fluid.set_flags({'FLAGS_sort_sum_gradient': True}) + paddle.manual_seed(seed) + # TODO: marsyang1993 Change seed to ptb_model = PtbModel( hidden_size=hidden_size, @@ -93,8 +94,7 @@ def ptb_rnn_sort_gradient_cpu_float32(self, is_sparse): dy_last_hidden_value = last_hidden.numpy() with new_program_scope(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) ptb_model = PtbModel( hidden_size=hidden_size, vocab_size=vocab_size, diff --git a/python/paddle/fluid/tests/unittests/test_imperative_se_resnext.py b/python/paddle/fluid/tests/unittests/test_imperative_se_resnext.py index 283addaf6283a..e22a91f848331 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_se_resnext.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_se_resnext.py @@ -308,8 +308,7 @@ def test_se_resnext_float32(self): batch_num = 1 epoch_num = 1 with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) se_resnext = SeResNeXt() optimizer = optimizer_setting( @@ -367,8 +366,7 @@ def test_se_resnext_float32(self): dy_param_value[param.name] = param.numpy() with new_program_scope(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) exe = fluid.Executor(fluid.CPUPlace( ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index 1992a3bb39807..d862d40dc1a51 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -57,8 +57,7 @@ def _get_place(self, force_to_use_cpu=False): @contextlib.contextmanager def static_graph(self): with new_program_scope(): - fluid.default_startup_program().random_seed = self.seed - fluid.default_main_program().random_seed = self.seed + paddle.manual_seed(self.seed) yield def get_static_graph_result(self, @@ -77,8 +76,7 @@ def get_static_graph_result(self, def dynamic_graph(self, force_to_use_cpu=False): with fluid.dygraph.guard( self._get_place(force_to_use_cpu=force_to_use_cpu)): - fluid.default_startup_program().random_seed = self.seed - fluid.default_main_program().random_seed = self.seed + paddle.manual_seed(self.seed) yield diff --git a/python/paddle/incubate/hapi/tests/test_model.py b/python/paddle/incubate/hapi/tests/test_model.py index 8e0c051ee8c39..756a7ac226423 100644 --- a/python/paddle/incubate/hapi/tests/test_model.py +++ b/python/paddle/incubate/hapi/tests/test_model.py @@ -22,6 +22,7 @@ import shutil import tempfile +import paddle from paddle import fluid from paddle.nn import Conv2d, Pool2D, Linear, ReLU, Sequential, Softmax from paddle.fluid.dygraph.base import to_variable @@ -170,8 +171,7 @@ def setUpClass(cls): cls.test_dataset, places=cls.device, batch_size=64) seed = 333 - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) dy_lenet = LeNetDygraph() cls.init_param = dy_lenet.state_dict() @@ -222,8 +222,7 @@ def test_prepare_context(self): def fit(self, dynamic, num_replicas=None, rank=None): fluid.enable_dygraph(self.device) if dynamic else None seed = 333 - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) net = LeNet(classifier_activation=None) optim_new = fluid.optimizer.Adam( @@ -327,8 +326,7 @@ def forward(self, x): class TestModelFunction(unittest.TestCase): def set_seed(self, seed=1024): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) def test_train_batch(self, dynamic=True): dim = 20 diff --git a/python/paddle/incubate/hapi/tests/test_text.py b/python/paddle/incubate/hapi/tests/test_text.py index bdc637997b0cb..b2f8e7d314631 100644 --- a/python/paddle/incubate/hapi/tests/test_text.py +++ b/python/paddle/incubate/hapi/tests/test_text.py @@ -20,6 +20,7 @@ import numpy as np +import paddle import paddle.fluid as fluid from paddle.fluid.dygraph import Embedding, Linear, Layer from paddle.fluid.layers import BeamSearchDecoder @@ -87,8 +88,8 @@ def _calc_output(self, place, mode="test", dygraph=True): fluid.enable_dygraph(place) else: fluid.disable_dygraph() - fluid.default_main_program().random_seed = self._random_seed - fluid.default_startup_program().random_seed = self._random_seed + paddle.manual_seed(self._random_seed) + layer = self.model_cls(**self.attrs) if isinstance( self.attrs, dict) else self.model_cls(*self.attrs) model = Model(layer, inputs=self.make_inputs()) From e78ce915ee78b877f029876a3d1b81d7f635027d Mon Sep 17 00:00:00 2001 From: zhiqiu Date: Fri, 21 Aug 2020 03:51:31 +0800 Subject: [PATCH 04/28] fix unittest --- .../unittests/dygraph_to_static/test_reinforcement_learning.py | 1 + python/paddle/fluid/tests/unittests/test_compiled_program.py | 3 +-- .../tests/unittests/test_imperative_ocr_attention_model.py | 1 + python/paddle/fluid/tests/unittests/test_while_op.py | 1 + 4 files changed, 4 insertions(+), 2 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_reinforcement_learning.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_reinforcement_learning.py index 6bbe3d81a883e..8d9ee61f5ec75 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_reinforcement_learning.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_reinforcement_learning.py @@ -16,6 +16,7 @@ import math import itertools import numpy as np +import paddle import paddle.fluid as fluid import paddle.fluid.dygraph.nn as nn from paddle.fluid.dygraph import to_variable, Layer diff --git a/python/paddle/fluid/tests/unittests/test_compiled_program.py b/python/paddle/fluid/tests/unittests/test_compiled_program.py index 691890520c6ad..39eec7fc3364b 100644 --- a/python/paddle/fluid/tests/unittests/test_compiled_program.py +++ b/python/paddle/fluid/tests/unittests/test_compiled_program.py @@ -63,8 +63,7 @@ def test_compiled_program_base(self): def test_compiled_program_with_data_parallel(self): with new_program_scope(): - fluid.default_startup_program().random_seed = self.seed - fluid.default_main_program().random_seed = self.seed + paddle.manual_seed(self.seed) place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda( ) else fluid.CPUPlace() exe = fluid.Executor(place) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py b/python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py index d92e96ecb2feb..a0e898b14f272 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py @@ -16,6 +16,7 @@ import unittest import numpy as np import six +import paddle import paddle.fluid as fluid from paddle.fluid import core from paddle.fluid.dygraph.nn import Conv2D, Pool2D, Linear, BatchNorm, Embedding, GRUUnit diff --git a/python/paddle/fluid/tests/unittests/test_while_op.py b/python/paddle/fluid/tests/unittests/test_while_op.py index 207ff66a0f877..ee01bfb21f820 100644 --- a/python/paddle/fluid/tests/unittests/test_while_op.py +++ b/python/paddle/fluid/tests/unittests/test_while_op.py @@ -15,6 +15,7 @@ from __future__ import print_function import unittest +import paddle import paddle.fluid.layers as layers from paddle.fluid.executor import Executor import paddle.fluid.core as core From 753da8bf00a1fe6ce7c578bc6be5230184269854 Mon Sep 17 00:00:00 2001 From: zhiqiu Date: Fri, 21 Aug 2020 09:18:05 +0800 Subject: [PATCH 05/28] set is_init_py=false in manual_seed --- paddle/fluid/framework/generator.cc | 9 ++++++- paddle/fluid/framework/generator.h | 9 +++++-- paddle/fluid/operators/uniform_random_op.cc | 13 +--------- paddle/fluid/pybind/generator_py.cc | 4 ++- .../fluid/tests/unittests/test_fc_op.py | 1 - .../fluid/tests/unittests/test_random_seed.py | 26 ++++--------------- python/paddle/framework/random.py | 2 ++ 7 files changed, 26 insertions(+), 38 deletions(-) diff --git a/paddle/fluid/framework/generator.cc b/paddle/fluid/framework/generator.cc index 00af213a98cd6..6c84324474963 100644 --- a/paddle/fluid/framework/generator.cc +++ b/paddle/fluid/framework/generator.cc @@ -14,6 +14,8 @@ limitations under the License. */ #include "paddle/fluid/framework/generator.h" +#include + #include #include #include @@ -26,7 +28,6 @@ namespace framework { const std::shared_ptr& DefaultCPUGenerator() { static auto default_cpu_generator = std::make_shared(GetRandomSeed()); - default_cpu_generator->is_init_py = true; return default_cpu_generator; } @@ -79,5 +80,11 @@ uint64_t Generator::Random64() { return this->state_->cpu_engine(); } +void Generator::SetIsInitPy(bool is_init_py) { + this->is_init_py_ = is_init_py; + VLOG(4) << "SetIsInitPy:" << this->is_init_py_; +} +bool Generator::GetIsInitPy() const { return this->is_init_py_; } + } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/generator.h b/paddle/fluid/framework/generator.h index 848fe98a253e5..b452cac77f456 100644 --- a/paddle/fluid/framework/generator.h +++ b/paddle/fluid/framework/generator.h @@ -56,6 +56,7 @@ struct Generator { std::seed_seq seq({seed}); default_gen_state_cpu.cpu_engine = std::mt19937_64(seq); this->state_ = std::make_shared(default_gen_state_cpu); + this->is_init_py_ = true; // TODO(zhiqiu): remove it in future } explicit Generator(GeneratorState state_in) : state_{std::make_shared(state_in)} {} @@ -70,7 +71,6 @@ struct Generator { uint64_t GetCurrentSeed(); // random a seed and get uint64_t Seed(); - // set seed void SetCurrentSeed(uint64_t seed); // get cpu engine @@ -80,7 +80,8 @@ struct Generator { uint64_t Random64(); - bool is_init_py = false; + void SetIsInitPy(bool); + bool GetIsInitPy() const; private: std::shared_ptr state_; @@ -88,6 +89,10 @@ struct Generator { Generator(const Generator& other, const std::lock_guard&) : state_(std::make_shared(*(other.state_))) {} + // NOTE(zhiqiu): is_init_py_ is used to make generator be compatible with old + // seed, and it should be removed after all random-related operators and + // unittests upgrades to use generator. + bool is_init_py_ = false; }; const std::shared_ptr& DefaultCPUGenerator(); diff --git a/paddle/fluid/operators/uniform_random_op.cc b/paddle/fluid/operators/uniform_random_op.cc index 32dc29e46be55..885c0d57d2d53 100644 --- a/paddle/fluid/operators/uniform_random_op.cc +++ b/paddle/fluid/operators/uniform_random_op.cc @@ -65,13 +65,8 @@ class CPUUniformRandomKernel : public framework::OpKernel { static_cast(ctx.Attr("min")), static_cast(ctx.Attr("max"))); auto gen_ptr = framework::DefaultCPUGenerator(); - if (gen_ptr->is_init_py) { + if (gen_ptr->GetIsInitPy()) { std::mt19937_64 &gen_engine = gen_ptr->GetCPUEngine(); - // auto gen_engine = gen_ptr_->GetCPUEngine(); - // std::uniform_real_distribution dist( - // static_cast(ctx.Attr("min")), - // static_cast(ctx.Attr("max"))); - for (int64_t i = 0; i < size; ++i) { data[i] = dist(gen_engine); } @@ -82,16 +77,10 @@ class CPUUniformRandomKernel : public framework::OpKernel { seed = std::random_device()(); } engine.seed(seed); - // std::uniform_real_distribution dist( - // static_cast(ctx.Attr("min")), - // static_cast(ctx.Attr("max"))); - // int64_t size = tensor->numel(); for (int64_t i = 0; i < size; ++i) { data[i] = dist(engine); } } - // std::mt19937_64 &engine = gen_ptr->GetCPUEngine(); - // auto engine = gen_ptr_->GetCPUEngine(); unsigned int diag_num = static_cast(ctx.Attr("diag_num")); diff --git a/paddle/fluid/pybind/generator_py.cc b/paddle/fluid/pybind/generator_py.cc index 3ad672db2ecd9..922bdddc1f8ff 100644 --- a/paddle/fluid/pybind/generator_py.cc +++ b/paddle/fluid/pybind/generator_py.cc @@ -57,7 +57,9 @@ void BindGenerator(py::module* m_ptr) { .def("random", &framework::Generator::Random64) .def("get_cpu_engine", &framework::Generator::GetCPUEngine, py::return_value_policy::move) - .def("set_cpu_engine", &framework::Generator::SetCPUEngine); + .def("set_cpu_engine", &framework::Generator::SetCPUEngine) + .def_property("_is_init_py", &framework::Generator::GetIsInitPy, + &framework::Generator::SetIsInitPy); m.def("default_cpu_generator", &framework::DefaultCPUGenerator); } // end Generator } // end namespace pybind diff --git a/python/paddle/fluid/tests/unittests/test_fc_op.py b/python/paddle/fluid/tests/unittests/test_fc_op.py index f463353bf1f31..a0514b62e53f6 100644 --- a/python/paddle/fluid/tests/unittests/test_fc_op.py +++ b/python/paddle/fluid/tests/unittests/test_fc_op.py @@ -158,7 +158,6 @@ def test_api(self): res_1, res_2 = exe.run(main_program, feed={"x": input}, fetch_list=[out_1, out_2]) - assert np.array_equal(res_1, res_2) diff --git a/python/paddle/fluid/tests/unittests/test_random_seed.py b/python/paddle/fluid/tests/unittests/test_random_seed.py index 7160c591351bf..1310ef01f04bd 100644 --- a/python/paddle/fluid/tests/unittests/test_random_seed.py +++ b/python/paddle/fluid/tests/unittests/test_random_seed.py @@ -36,18 +36,18 @@ def test_generator_uniform_random_dygraph(self): fluid.enable_dygraph() gen = paddle.manual_seed(12312321111) - + print(gen.initial_seed()) x = fluid.layers.uniform_random([10], dtype="float32", min=0.0, max=1.0) st1 = gen.get_state() - + print(st1) x1 = fluid.layers.uniform_random( [10], dtype="float32", min=0.0, max=1.0) - + print(gen.initial_seed()) gen.set_state(st1) - + print(gen.get_state()) x2 = fluid.layers.uniform_random( [10], dtype="float32", min=0.0, max=1.0) - + print(gen.initial_seed()) paddle.manual_seed(12312321111) x3 = fluid.layers.uniform_random( [10], dtype="float32", min=0.0, max=1.0) @@ -64,12 +64,8 @@ def test_generator_uniform_random_static(self): fluid.disable_dygraph() -<<<<<<< 976fd4b093360e81d01c6aa4fc93397688c45a00 gen = generator.Generator() gen.manual_seed(123123143) -======= - gen = paddle.manual_seed(123123143) ->>>>>>> fix ci problem startup_program = fluid.Program() train_program = fluid.Program() @@ -100,7 +96,6 @@ def test_generator_uniform_random_static(self): self.assertTrue(np.allclose(out1_res2, out2_res2)) self.assertTrue(not np.allclose(out1_res2, out1_res1)) -<<<<<<< 976fd4b093360e81d01c6aa4fc93397688c45a00 def test_gen_dropout_dygraph(self): gen = generator.Generator() @@ -280,14 +275,6 @@ def test_generator_randint_dygraph(self): fluid.enable_dygraph() gen.manual_seed(12312321111) -======= - def test_generator_randint_dygraph(self): - """Test Generator seed.""" - gen = paddle.manual_seed(12312321111) - - fluid.enable_dygraph() - ->>>>>>> fix ci problem x = paddle.randint(low=1) st1 = gen.get_state() x1 = paddle.randint(low=1) @@ -302,7 +289,6 @@ def test_generator_randint_dygraph(self): if not core.is_compiled_with_cuda(): self.assertTrue(np.allclose(x1_np, x2_np)) self.assertTrue(np.allclose(x_np, x3_np)) -<<<<<<< 976fd4b093360e81d01c6aa4fc93397688c45a00 def test_generator_ranint_static(self): @@ -534,8 +520,6 @@ def test_gen_TruncatedNormal_initializer(self): self.assertTrue(np.allclose(out1_res1, out2_res1)) self.assertTrue(np.allclose(out1_res2, out2_res2)) self.assertTrue(not np.allclose(out1_res2, out1_res1)) -======= ->>>>>>> fix ci problem if __name__ == "__main__": diff --git a/python/paddle/framework/random.py b/python/paddle/framework/random.py index 62721915b70f0..c8bb4eb7da5ab 100644 --- a/python/paddle/framework/random.py +++ b/python/paddle/framework/random.py @@ -47,4 +47,6 @@ def manual_seed(seed): program = fluid.Program() program.global_seed(seed) + core.default_cpu_generator()._is_init_py = False + print(core.default_cpu_generator()._is_init_py) return core.default_cpu_generator().manual_seed(seed) From f35eb9b94badab54fb3d1dda7f1b8276834ca765 Mon Sep 17 00:00:00 2001 From: zhiqiu Date: Fri, 21 Aug 2020 13:15:27 +0800 Subject: [PATCH 06/28] fix unittest --- .../fluid/tests/unittests/test_imperative_resnet.py | 6 ++---- .../test_imperative_resnet_sorted_gradient.py | 6 ++---- .../fluid/tests/unittests/test_uniform_random_op.py | 12 +++++++----- 3 files changed, 11 insertions(+), 13 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_resnet.py b/python/paddle/fluid/tests/unittests/test_imperative_resnet.py index 815437072fde2..7602994fcecee 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_resnet.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_resnet.py @@ -251,8 +251,7 @@ def test_resnet_float32(self): traced_layer = None with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) resnet = ResNet() optimizer = optimizer_setting( @@ -334,8 +333,7 @@ def test_resnet_float32(self): dy_param_value[param.name] = param.numpy() with new_program_scope(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) exe = fluid.Executor(fluid.CPUPlace( ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_resnet_sorted_gradient.py b/python/paddle/fluid/tests/unittests/test_imperative_resnet_sorted_gradient.py index d26d6f25aa8ff..f9ec003cc9c64 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_resnet_sorted_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_resnet_sorted_gradient.py @@ -77,9 +77,8 @@ def test_resnet_sort_gradient_float32(self): batch_size = train_parameters["batch_size"] batch_num = 10 with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed fluid.set_flags({'FLAGS_sort_sum_gradient': True}) + paddle.manual_seed(seed) resnet = ResNet() optimizer = optimizer_setting( train_parameters, parameter_list=resnet.parameters()) @@ -136,8 +135,7 @@ def test_resnet_sort_gradient_float32(self): dy_param_value[param.name] = param.numpy() with new_program_scope(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) exe = fluid.Executor(fluid.CPUPlace( ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) diff --git a/python/paddle/fluid/tests/unittests/test_uniform_random_op.py b/python/paddle/fluid/tests/unittests/test_uniform_random_op.py index 158462a1e6e10..69a5ad3a0395e 100644 --- a/python/paddle/fluid/tests/unittests/test_uniform_random_op.py +++ b/python/paddle/fluid/tests/unittests/test_uniform_random_op.py @@ -21,6 +21,7 @@ from op_test import OpTest import paddle import paddle.fluid.core as core +import paddle from paddle.fluid.op import Operator import paddle.fluid as fluid from paddle.fluid import Program, program_guard @@ -259,15 +260,15 @@ def check_with_place(self, place): op = Operator( "uniform_random", Out="X", - shape=[4, 784], + shape=[100, 784], min=-5.0, max=10.0, seed=10, - diag_num=4, + diag_num=100, diag_step=784, diag_val=1.0) op.run(scope, place) - self.assertEqual(out.get_tensor().shape(), [4, 784]) + self.assertEqual(out.get_tensor().shape(), [100, 784]) hist, prob = output_hist_diag(np.array(out.get_tensor())) self.assertTrue( np.allclose( @@ -347,6 +348,7 @@ def test_attr_tensor_int32_API(self): class TestUniformRandomOp_API_seed(unittest.TestCase): def test_attr_tensor_API(self): + paddle.fluid.core.default_cpu_generator()._is_init_py = False startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(train_program, startup_program): @@ -386,7 +388,7 @@ def check_with_place(self, place): scope = core.Scope() out = scope.var("X").get_selected_rows() shape_tensor = scope.var("Shape").get_tensor() - shape_tensor.set(np.array([4, 784]).astype("int64"), place) + shape_tensor.set(np.array([100, 784]).astype("int64"), place) op = Operator( "uniform_random", @@ -396,7 +398,7 @@ def check_with_place(self, place): max=10.0, seed=10) op.run(scope, place) - self.assertEqual(out.get_tensor().shape(), [4, 784]) + self.assertEqual(out.get_tensor().shape(), [100, 784]) hist, prob = output_hist(np.array(out.get_tensor())) self.assertTrue( np.allclose( From d6524a4a0279e5e585626eb27c74b23119b095c0 Mon Sep 17 00:00:00 2001 From: zhiqiu Date: Sat, 22 Aug 2020 11:32:59 +0800 Subject: [PATCH 07/28] fix bernoulli_op --- paddle/fluid/operators/bernoulli_op.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/fluid/operators/bernoulli_op.cc b/paddle/fluid/operators/bernoulli_op.cc index c525da5953d76..0e81f7accb549 100644 --- a/paddle/fluid/operators/bernoulli_op.cc +++ b/paddle/fluid/operators/bernoulli_op.cc @@ -64,7 +64,7 @@ class BernoulliOpKernel int64_t size = x->numel(); std::uniform_real_distribution dist(0.0, 1.0); - auto gen_ptr = framework::Generator::GetInstance(); + auto gen_ptr = framework::DefaultCPUGenerator(); std::mt19937_64 &gen_engine = gen_ptr->GetCPUEngine(); for (int64_t i = 0; i < size; ++i) { From ffab8b3581d4e32eeb80c301df44626a0fc40379 Mon Sep 17 00:00:00 2001 From: zhiqiu Date: Sun, 23 Aug 2020 01:58:56 +0800 Subject: [PATCH 08/28] fix(unittest): change random_seed to manual_seed --- .../fluid/tests/unittests/parallel_executor_test_base.py | 5 +++-- .../test_imperative_selected_rows_to_lod_tensor.py | 7 +++---- .../test_imperative_star_gan_with_gradient_penalty.py | 8 ++------ .../tests/unittests/test_ir_memory_optimize_ifelse_op.py | 3 +-- python/paddle/fluid/tests/unittests/test_py_func_op.py | 3 +-- python/paddle/fluid/tests/unittests/test_regularizer.py | 7 ++++--- 6 files changed, 14 insertions(+), 19 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py b/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py index ec6b81f138321..3c760e154ba20 100644 --- a/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py +++ b/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py @@ -17,6 +17,7 @@ import multiprocessing import os import unittest +import paddle import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid import compiler @@ -64,10 +65,10 @@ def run_executor(exe, binary, feed, fetch_list): feed_data_reader, FeedDataReader ), "feed_data_reader must be type of FeedDataReader" + paddle.manual_seed(1) main = fluid.Program() startup = fluid.Program() - startup.random_seed = 1 - main.random_seed = 1 + with fluid.program_guard(main, startup): feed_dict, loss = cls.build_model(feed_dict, get_data_from_feeder, main, method, optimizer) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_selected_rows_to_lod_tensor.py b/python/paddle/fluid/tests/unittests/test_imperative_selected_rows_to_lod_tensor.py index 3765cb784d652..5b612d2267635 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_selected_rows_to_lod_tensor.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_selected_rows_to_lod_tensor.py @@ -15,6 +15,7 @@ from __future__ import print_function import unittest +import paddle import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid.dygraph.nn import Embedding @@ -101,8 +102,7 @@ def simple_net_float(self, is_sparse, dtype): for is_sort_sum_gradient in [True, False]: traced_layer = None with fluid.dygraph.guard(place): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) simple_net = SimpleNet( hidden_size=hidden_size, @@ -145,8 +145,7 @@ def simple_net_float(self, is_sparse, dtype): dy_loss_value = dy_loss.numpy() with new_program_scope(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) simple_net = SimpleNet( hidden_size=hidden_size, diff --git a/python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py b/python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py index d603a7d6ca0de..f3dd72bfca906 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py @@ -468,8 +468,7 @@ def build_optimizer(layer, cfg, loss=None): class DyGraphTrainModel(object): def __init__(self, cfg): - fluid.default_startup_program().random_seed = cfg.seed - fluid.default_main_program().random_seed = cfg.seed + paddle.manual_seed(1) self.generator = Generator(cfg) self.discriminator = Discriminator(cfg) @@ -529,12 +528,11 @@ def create_data_layer(): shape=[None, cfg.c_dim], dtype='float32', name='label_trg') return image_real, label_org, label_trg + paddle.manual_seed(cfg.seed) self.gen_program = fluid.Program() gen_startup_program = fluid.Program() with fluid.program_guard(self.gen_program, gen_startup_program): - self.gen_program.random_seed = cfg.seed - gen_startup_program.random_seed = cfg.seed with fluid.unique_name.guard(): image_real, label_org, label_trg = create_data_layer() generator = Generator(cfg) @@ -546,8 +544,6 @@ def create_data_layer(): self.dis_program = fluid.Program() dis_startup_program = fluid.Program() with fluid.program_guard(self.dis_program, dis_startup_program): - self.dis_program.random_seed = cfg.seed - dis_startup_program.random_seed = cfg.seed with fluid.unique_name.guard(): image_real, label_org, label_trg = create_data_layer() generator = Generator(cfg) diff --git a/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_ifelse_op.py b/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_ifelse_op.py index c5228fcf12274..4f4622bf16ad4 100644 --- a/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_ifelse_op.py +++ b/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_ifelse_op.py @@ -37,10 +37,9 @@ def check_network_convergence(self, use_cuda=True, use_mem_opt=False, iter_num=5): + paddle.manual_seed(100) prog = Program() startup_prog = Program() - prog.random_seed = 100 - startup_prog.random_seed = 100 with program_guard(prog, startup_prog): image = layers.data(name='x', shape=[784], dtype='float32') diff --git a/python/paddle/fluid/tests/unittests/test_py_func_op.py b/python/paddle/fluid/tests/unittests/test_py_func_op.py index 6045f2d713627..b63e240c805f3 100644 --- a/python/paddle/fluid/tests/unittests/test_py_func_op.py +++ b/python/paddle/fluid/tests/unittests/test_py_func_op.py @@ -147,8 +147,7 @@ def test_main(use_cuda, use_py_func_op, use_parallel_executor): with fluid.program_guard(fluid.Program(), fluid.Program()): with fluid.scope_guard(fluid.core.Scope()): - fluid.default_main_program().random_seed = 1 - fluid.default_startup_program().random_seed = 1 + paddle.manual_seed(1) np.random.seed(1) img = fluid.layers.data(name='image', shape=[784], dtype='float32') diff --git a/python/paddle/fluid/tests/unittests/test_regularizer.py b/python/paddle/fluid/tests/unittests/test_regularizer.py index 58b407f8bc1f4..6d288ea6e08b9 100644 --- a/python/paddle/fluid/tests/unittests/test_regularizer.py +++ b/python/paddle/fluid/tests/unittests/test_regularizer.py @@ -169,9 +169,9 @@ def run_program(self, place, feed_list): return param_sum def check_l2decay_regularizer(self, place, model): + paddle.manual_seed(1) main_prog = fluid.framework.Program() startup_prog = fluid.framework.Program() - startup_prog.random_seed = 1 with self.scope_prog_guard( main_prog=main_prog, startup_prog=startup_prog): data = fluid.layers.data( @@ -188,9 +188,10 @@ def check_l2decay_regularizer(self, place, model): return param_sum def check_l2decay(self, place, model): + paddle.manual_seed(1) main_prog = fluid.framework.Program() startup_prog = fluid.framework.Program() - startup_prog.random_seed = 1 + with self.scope_prog_guard( main_prog=main_prog, startup_prog=startup_prog): data = fluid.layers.data( @@ -243,7 +244,7 @@ def test_repeated_regularization(self): with fluid.dygraph.guard(): input = fluid.dygraph.to_variable( np.random.randn(3, 5).astype('float32')) - fluid.default_main_program().random_seed = 1 + paddle.manual_seed(1) linear1 = fluid.dygraph.Linear( 5, 2, param_attr=fc_param_attr, bias_attr=fc_param_attr) From a5c4cc8846993ee7409719433862c722747905b9 Mon Sep 17 00:00:00 2001 From: zhiqiu Date: Sun, 23 Aug 2020 10:32:46 +0800 Subject: [PATCH 09/28] =?UTF-8?q?=F0=9F=90=9Efix(unittest):=20fix=20manual?= =?UTF-8?q?=5Fseed?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../tests/unittests/test_fuse_bn_act_pass.py | 2 +- .../tests/unittests/test_imperative_deepcf.py | 10 +++------- .../test_imperative_reinforcement.py | 6 ++---- .../unittests/test_imperative_save_load.py | 20 ++++++------------- ..._imperative_transformer_sorted_gradient.py | 8 ++++---- .../tests/unittests/test_uniform_random_op.py | 4 ++-- 6 files changed, 18 insertions(+), 32 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_fuse_bn_act_pass.py b/python/paddle/fluid/tests/unittests/test_fuse_bn_act_pass.py index ebfcf4c244d0c..85e5513f8fb2d 100644 --- a/python/paddle/fluid/tests/unittests/test_fuse_bn_act_pass.py +++ b/python/paddle/fluid/tests/unittests/test_fuse_bn_act_pass.py @@ -19,7 +19,6 @@ class TestFuseBatchNormActPass(unittest.TestCase): def build_program(self, main_program, startup_program, use_cuda, seed=1): - paddle.manual_seed(seed) with fluid.program_guard(main_program, startup_program): x = fluid.layers.data(name='x', shape=[1, 28, 28], dtype='float32') y = fluid.layers.data(name="y", shape=[1], dtype='int64') @@ -58,6 +57,7 @@ def build_program(self, main_program, startup_program, use_cuda, seed=1): return x, y, loss def check(self, place, use_cuda): + paddle.manual_seed(1) main_program = fluid.Program() startup_program = fluid.Program() x, y, loss = self.build_program(main_program, startup_program, use_cuda) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_deepcf.py b/python/paddle/fluid/tests/unittests/test_imperative_deepcf.py index af71d9d27b9a3..2f3f98e5dbcf9 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_deepcf.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_deepcf.py @@ -206,11 +206,9 @@ def test_deefcf(self): else: (users_np, items_np, labels_np, num_users, num_items, matrix) = get_data() - + paddle.manual_seed(seed) startup = fluid.Program() - startup.random_seed = seed main = fluid.Program() - main.random_seed = seed scope = fluid.core.Scope() with new_program_scope(main=main, startup=startup, scope=scope): @@ -244,8 +242,7 @@ def test_deefcf(self): sys.stderr.write('static loss %s\n' % static_loss) with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) deepcf = DeepCF(num_users, num_items, matrix) adam = fluid.optimizer.AdamOptimizer( @@ -269,8 +266,7 @@ def test_deefcf(self): sys.stderr.write('dynamic loss: %s %s\n' % (slice, dy_loss)) with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) deepcf2 = DeepCF(num_users, num_items, matrix) adam2 = fluid.optimizer.AdamOptimizer( diff --git a/python/paddle/fluid/tests/unittests/test_imperative_reinforcement.py b/python/paddle/fluid/tests/unittests/test_imperative_reinforcement.py index 735ec4d3f1ea8..935c28dbebb7f 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_reinforcement.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_reinforcement.py @@ -64,8 +64,7 @@ def test_mnist_float32(self): mask = np.array(mask_list).astype("float32") with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) policy = Policy(input_size=4) @@ -105,8 +104,7 @@ def test_mnist_float32(self): dy_param_value[param.name] = param.numpy() with new_program_scope(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) exe = fluid.Executor(fluid.CPUPlace( ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_save_load.py b/python/paddle/fluid/tests/unittests/test_imperative_save_load.py index eb9dc926c8207..fba793a83bd8f 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_save_load.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_save_load.py @@ -219,8 +219,7 @@ def setUp(self): batch_num = 200 with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) # TODO: marsyang1993 Change seed to ptb_model = PtbModel( hidden_size=hidden_size, @@ -305,8 +304,7 @@ def testLoadAndSetVarBase(self): batch_num = 200 with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) # TODO: marsyang1993 Change seed to ptb_model = PtbModel( hidden_size=hidden_size, @@ -414,8 +412,7 @@ def testSetVariable(self): batch_num = 200 with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) # TODO: marsyang1993 Change seed to ptb_model = PtbModel( hidden_size=hidden_size, @@ -521,8 +518,7 @@ def testSetNumpy(self): batch_num = 200 with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) # TODO: marsyang1993 Change seed to ptb_model = PtbModel( hidden_size=hidden_size, @@ -634,8 +630,6 @@ def testSetVariableBeforeTrain(self): batch_num = 200 with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed # TODO: marsyang1993 Change seed to ptb_model = PtbModel( hidden_size=hidden_size, @@ -713,8 +707,7 @@ def testLoadAndSetVarBaseBeforeTrain(self): batch_num = 200 with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) # TODO: marsyang1993 Change seed to ptb_model = PtbModel( hidden_size=hidden_size, @@ -804,8 +797,7 @@ def testSetNumpyBeforeTrain(self): batch_num = 200 with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) # TODO: marsyang1993 Change seed to ptb_model = PtbModel( hidden_size=hidden_size, diff --git a/python/paddle/fluid/tests/unittests/test_imperative_transformer_sorted_gradient.py b/python/paddle/fluid/tests/unittests/test_imperative_transformer_sorted_gradient.py index c59ce44ec96a8..35418e0ac8db0 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_transformer_sorted_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_transformer_sorted_gradient.py @@ -15,6 +15,7 @@ from __future__ import print_function import unittest +import paddle import paddle.fluid as fluid from paddle.fluid import Embedding, LayerNorm, Linear, Layer from paddle.fluid.dygraph import to_variable, guard @@ -949,9 +950,9 @@ def transformer_sort_gradient_float32(self, is_sparse): seed = 90 with guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed fluid.set_flags({'FLAGS_sort_sum_gradient': True}) + paddle.manual_seed(seed) + transformer = TransFormer( ModelHyperParams.src_vocab_size, ModelHyperParams.trg_vocab_size, @@ -1034,8 +1035,7 @@ def transformer_sort_gradient_float32(self, is_sparse): dy_token_num_value = dy_token_num.numpy() with new_program_scope(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) transformer = TransFormer( ModelHyperParams.src_vocab_size, ModelHyperParams.trg_vocab_size, diff --git a/python/paddle/fluid/tests/unittests/test_uniform_random_op.py b/python/paddle/fluid/tests/unittests/test_uniform_random_op.py index 69a5ad3a0395e..9677f1362604d 100644 --- a/python/paddle/fluid/tests/unittests/test_uniform_random_op.py +++ b/python/paddle/fluid/tests/unittests/test_uniform_random_op.py @@ -420,7 +420,7 @@ def check_with_place(self, place): scope = core.Scope() out = scope.var("X").get_selected_rows() shape_1 = scope.var("shape1").get_tensor() - shape_1.set(np.array([4]).astype("int64"), place) + shape_1.set(np.array([100]).astype("int64"), place) shape_2 = scope.var("shape2").get_tensor() shape_2.set(np.array([784]).astype("int64"), place) @@ -432,7 +432,7 @@ def check_with_place(self, place): max=10.0, seed=10) op.run(scope, place) - self.assertEqual(out.get_tensor().shape(), [4, 784]) + self.assertEqual(out.get_tensor().shape(), [100, 784]) hist, prob = output_hist(np.array(out.get_tensor())) self.assertTrue( np.allclose( From 8ded999b9d824c8d172e835e7c748be8cd2aff1d Mon Sep 17 00:00:00 2001 From: zhiqiu Date: Sun, 23 Aug 2020 22:17:19 +0800 Subject: [PATCH 10/28] trigger ci --- python/paddle/dataset/tests/test_sentiment.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/python/paddle/dataset/tests/test_sentiment.py b/python/paddle/dataset/tests/test_sentiment.py index bb9830132e987..3540ea06b075e 100644 --- a/python/paddle/dataset/tests/test_sentiment.py +++ b/python/paddle/dataset/tests/test_sentiment.py @@ -42,9 +42,11 @@ def test_sort_files(self): def test_data_set(self): data_set = st.load_sentiment_data() last_label = -1 + for each in st.test(): self.assertNotEqual(each[1], last_label) last_label = each[1] + self.assertEqual(len(data_set), st.NUM_TOTAL_INSTANCES) self.assertEqual(len(list(st.train())), st.NUM_TRAINING_INSTANCES) self.assertEqual( From 95d6e064900bf6098548d7af4bfd7d975c27eaf0 Mon Sep 17 00:00:00 2001 From: zhiqiu Date: Sun, 23 Aug 2020 23:06:09 +0800 Subject: [PATCH 11/28] fix test_sentiment --- .../fluid/tests/unittests/dygraph_to_static/test_sentiment.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_sentiment.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_sentiment.py index fd5a58be26be4..e46fc726287d5 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_sentiment.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_sentiment.py @@ -15,6 +15,7 @@ import unittest import numpy as np +import paddle import paddle.fluid as fluid from paddle.fluid.dygraph.nn import Conv2D, Linear, Embedding from paddle.fluid.dygraph import to_variable, ProgramTranslator, declarative @@ -285,8 +286,7 @@ def train(args, to_static): with fluid.dygraph.guard(place): np.random.seed(SEED) - fluid.default_startup_program().random_seed = SEED - fluid.default_main_program().random_seed = SEED + paddle.manual_seed(SEED) train_reader = fake_data_reader(args.class_num, args.vocab_size, args.batch_size, args.padding_size) From 987243d669d85b2b630e587a5772e446e011fe81 Mon Sep 17 00:00:00 2001 From: zhiqiu Date: Sun, 23 Aug 2020 23:24:19 +0800 Subject: [PATCH 12/28] fix test_imperative_save_load --- .../unittests/test_imperative_optimizer_v2.py | 9 +++----- .../unittests/test_imperative_save_load_v2.py | 21 +++++++------------ 2 files changed, 10 insertions(+), 20 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_optimizer_v2.py b/python/paddle/fluid/tests/unittests/test_imperative_optimizer_v2.py index 9f75c92b185ed..2481f024fdab8 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_optimizer_v2.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_optimizer_v2.py @@ -74,8 +74,7 @@ def _check_exception(self, exception_message, place=None): with fluid.dygraph.guard(place): try: - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) mlp = MLP() optimizer = self.get_optimizer_dygraph( parameter_list=mlp.parameters()) @@ -91,8 +90,7 @@ def _check_mlp(self, place=None): ) else fluid.CUDAPlace(0) with fluid.dygraph.guard(place): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) mlp = MLP() optimizer = self.get_optimizer_dygraph( @@ -132,8 +130,7 @@ def _check_mlp(self, place=None): dy_param_value[param.name] = param.numpy() with new_program_scope(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) if place == None: place = fluid.CPUPlace() if not core.is_compiled_with_cuda( diff --git a/python/paddle/fluid/tests/unittests/test_imperative_save_load_v2.py b/python/paddle/fluid/tests/unittests/test_imperative_save_load_v2.py index 4ab35a21aff43..d7e04ea8c97c4 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_save_load_v2.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_save_load_v2.py @@ -219,8 +219,7 @@ def setUp(self): batch_num = 200 with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) # TODO: marsyang1993 Change seed to ptb_model = PtbModel( hidden_size=hidden_size, @@ -305,8 +304,7 @@ def testLoadAndSetVarBase(self): batch_num = 200 with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) # TODO: marsyang1993 Change seed to ptb_model = PtbModel( hidden_size=hidden_size, @@ -414,8 +412,7 @@ def testSetVariable(self): batch_num = 200 with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) # TODO: marsyang1993 Change seed to ptb_model = PtbModel( hidden_size=hidden_size, @@ -521,8 +518,7 @@ def testSetNumpy(self): batch_num = 200 with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) # TODO: marsyang1993 Change seed to ptb_model = PtbModel( hidden_size=hidden_size, @@ -634,8 +630,7 @@ def testSetVariableBeforeTrain(self): batch_num = 200 with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) # TODO: marsyang1993 Change seed to ptb_model = PtbModel( hidden_size=hidden_size, @@ -713,8 +708,7 @@ def testLoadAndSetVarBaseBeforeTrain(self): batch_num = 200 with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) # TODO: marsyang1993 Change seed to ptb_model = PtbModel( hidden_size=hidden_size, @@ -804,8 +798,7 @@ def testSetNumpyBeforeTrain(self): batch_num = 200 with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) # TODO: marsyang1993 Change seed to ptb_model = PtbModel( hidden_size=hidden_size, From e3fe880d6d4339fbdd169d17e9c345035efe2869 Mon Sep 17 00:00:00 2001 From: zhiqiu Date: Mon, 24 Aug 2020 01:38:28 +0800 Subject: [PATCH 13/28] fix test_uniform_random_op --- .../tests/unittests/test_uniform_random_op.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_uniform_random_op.py b/python/paddle/fluid/tests/unittests/test_uniform_random_op.py index 9677f1362604d..345dced9e2e77 100644 --- a/python/paddle/fluid/tests/unittests/test_uniform_random_op.py +++ b/python/paddle/fluid/tests/unittests/test_uniform_random_op.py @@ -239,12 +239,12 @@ def check_with_place(self, place): op = Operator( "uniform_random", Out="X", - shape=[4, 784], + shape=[100, 784], min=-5.0, max=10.0, seed=10) op.run(scope, place) - self.assertEqual(out.get_tensor().shape(), [4, 784]) + self.assertEqual(out.get_tensor().shape(), [100, 784]) hist, prob = output_hist(np.array(out.get_tensor())) self.assertTrue( np.allclose( @@ -457,21 +457,21 @@ def test_errors(self): def test_Variable(): x1 = fluid.create_lod_tensor( - np.zeros((4, 784)), [[1, 1, 1, 1]], fluid.CPUPlace()) + np.zeros((100, 784)), [[10, 10, 10, 70]], fluid.CPUPlace()) fluid.layers.uniform_random_batch_size_like(x1) self.assertRaises(TypeError, test_Variable) def test_shape(): x1 = fluid.layers.data( - name='x2', shape=[4, 784], dtype='float32') + name='x2', shape=[100, 784], dtype='float32') fluid.layers.uniform_random_batch_size_like(x1, shape="shape") self.assertRaises(TypeError, test_shape) def test_dtype(): x2 = fluid.layers.data( - name='x2', shape=[4, 784], dtype='float32') + name='x2', shape=[100, 784], dtype='float32') fluid.layers.uniform_random_batch_size_like(x2, 'int32') self.assertRaises(TypeError, test_dtype) @@ -497,20 +497,20 @@ def test_errors(self): def test_Variable(): x1 = fluid.create_lod_tensor( - np.zeros((4, 784)), [[1, 1, 1, 1]], fluid.CPUPlace()) + np.zeros((100, 784)), [[10, 10, 10, 70]], fluid.CPUPlace()) paddle.tensor.random.uniform(x1) self.assertRaises(TypeError, test_Variable) def test_Variable2(): - x1 = np.zeros((4, 784)) + x1 = np.zeros((100, 784)) paddle.tensor.random.uniform(x1) self.assertRaises(TypeError, test_Variable2) def test_dtype(): x2 = fluid.layers.data( - name='x2', shape=[4, 784], dtype='float32') + name='x2', shape=[100, 784], dtype='float32') paddle.tensor.random.uniform(x2, 'int32') self.assertRaises(TypeError, test_dtype) From f2f20a57d7cda56aab7852e169d2e0098347a633 Mon Sep 17 00:00:00 2001 From: zhiqiu Date: Mon, 24 Aug 2020 09:53:07 +0800 Subject: [PATCH 14/28] fix test_uniform_random_op --- .../tests/unittests/test_uniform_random_op.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_uniform_random_op.py b/python/paddle/fluid/tests/unittests/test_uniform_random_op.py index 345dced9e2e77..b5ca1468a0c35 100644 --- a/python/paddle/fluid/tests/unittests/test_uniform_random_op.py +++ b/python/paddle/fluid/tests/unittests/test_uniform_random_op.py @@ -235,7 +235,7 @@ def test_check_output(self): def check_with_place(self, place): scope = core.Scope() out = scope.var("X").get_selected_rows() - + paddle.manual_seed(10) op = Operator( "uniform_random", Out="X", @@ -256,7 +256,7 @@ class TestUniformRandomOpSelectedRowsWithDiagInit( def check_with_place(self, place): scope = core.Scope() out = scope.var("X").get_selected_rows() - + paddle.manual_seed(10) op = Operator( "uniform_random", Out="X", @@ -277,6 +277,7 @@ def check_with_place(self, place): class TestUniformRandomOpApi(unittest.TestCase): def test_api(self): + paddle.manual_seed(10) x = fluid.layers.data('x', shape=[16], dtype='float32', lod_level=1) y = fluid.layers.fc(x, size=16, @@ -348,13 +349,14 @@ def test_attr_tensor_int32_API(self): class TestUniformRandomOp_API_seed(unittest.TestCase): def test_attr_tensor_API(self): - paddle.fluid.core.default_cpu_generator()._is_init_py = False + _seed = 10 + paddle.manual_seed(_seed) startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(train_program, startup_program): _min = 5 _max = 10 - _seed = 10 + ret = fluid.layers.nn.uniform_random( [2, 3, 2], min=_min, max=_max, seed=_seed) ret_2 = fluid.layers.nn.uniform_random( @@ -389,7 +391,7 @@ def check_with_place(self, place): out = scope.var("X").get_selected_rows() shape_tensor = scope.var("Shape").get_tensor() shape_tensor.set(np.array([100, 784]).astype("int64"), place) - + paddle.manual_seed(10) op = Operator( "uniform_random", ShapeTensor="Shape", @@ -423,7 +425,7 @@ def check_with_place(self, place): shape_1.set(np.array([100]).astype("int64"), place) shape_2 = scope.var("shape2").get_tensor() shape_2.set(np.array([784]).astype("int64"), place) - + paddle.manual_seed(10) op = Operator( "uniform_random", ShapeTensorList=["shape1", "shape2"], From 1cded665059719d9b0e0b49b1a3124927e2bf345 Mon Sep 17 00:00:00 2001 From: zhiqiu Date: Mon, 24 Aug 2020 10:37:08 +0800 Subject: [PATCH 15/28] fix test_jit_save_load --- .../fluid/tests/unittests/test_jit_save_load.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_jit_save_load.py b/python/paddle/fluid/tests/unittests/test_jit_save_load.py index 2b79659b9c695..6421631f9e88e 100644 --- a/python/paddle/fluid/tests/unittests/test_jit_save_load.py +++ b/python/paddle/fluid/tests/unittests/test_jit_save_load.py @@ -80,7 +80,7 @@ def forward(self, x): def train(layer, input_size=784, label_size=1): # create optimizer - adam = fluid.optimizer.SGDOptimizer( + sgd = fluid.optimizer.SGDOptimizer( learning_rate=0.01, parameter_list=layer.parameters()) # create data loader train_loader = fluid.io.DataLoader.from_generator(capacity=5) @@ -97,7 +97,7 @@ def train(layer, input_size=784, label_size=1): avg_loss = fluid.layers.mean(loss) avg_loss.backward() - adam.minimize(avg_loss) + sgd.minimize(avg_loss) layer.clear_gradients() return [img], layer, avg_loss @@ -108,7 +108,7 @@ def setUp(self): # enable dygraph mode fluid.enable_dygraph() # config seed - fluid.default_main_program().random_seed = SEED + paddle.manual_seed(SEED) def train_and_save_model(self, model_path=None, configs=None): layer = LinearNet(784, 1) @@ -149,8 +149,8 @@ def load_and_finetune(self, train_layer, load_train_layer): train_layer.train() load_train_layer.train() # train & compare - _, _, train_loss = train(train_layer) - _, _, load_train_loss = train(load_train_layer) + img0, _, train_loss = train(train_layer) + img1, _, load_train_loss = train(load_train_layer) self.assertTrue( np.array_equal(train_loss.numpy(), load_train_loss.numpy())) @@ -293,7 +293,7 @@ def setUp(self): # enable dygraph mode fluid.enable_dygraph() # config seed - fluid.default_main_program().random_seed = SEED + paddle.manual_seed(SEED) def basic_save_load(self, layer, model_path, configs): # 1. train & save @@ -385,7 +385,7 @@ def setUp(self): # enable dygraph mode fluid.enable_dygraph() # config seed - fluid.default_main_program().random_seed = SEED + paddle.manual_seed(SEED) # train and save base model self.train_and_save_orig_model() @@ -426,7 +426,7 @@ def setUp(self): # enable dygraph mode fluid.enable_dygraph() # config seed - fluid.default_main_program().random_seed = SEED + paddle.manual_seed(SEED) def train_and_save(self): train_layer = LinearNetReturnHidden(8, 8) From 665fcd4f3354687848f85d7da593923984f20945 Mon Sep 17 00:00:00 2001 From: zhiqiu Date: Mon, 24 Aug 2020 14:37:17 +0800 Subject: [PATCH 16/28] merge develop --- .../operators/distributed/large_scale_kv.h | 13 ++- paddle/fluid/operators/dropout_op.h | 4 +- paddle/fluid/operators/gaussian_random_op.cc | 4 +- paddle/fluid/operators/math/sampler.cc | 22 ++--- .../mkldnn/gaussian_random_mkldnn_op.cc | 5 +- paddle/fluid/operators/randint_op.cc | 4 +- paddle/fluid/operators/randperm_op.h | 5 +- paddle/fluid/operators/sampling_id_op.h | 4 +- .../operators/truncated_gaussian_random_op.cc | 5 +- .../fluid/tests/unittests/test_random_seed.py | 86 ++++++------------- python/paddle/framework/random.py | 3 +- 11 files changed, 63 insertions(+), 92 deletions(-) diff --git a/paddle/fluid/operators/distributed/large_scale_kv.h b/paddle/fluid/operators/distributed/large_scale_kv.h index 0d7032e286caa..44e49240a5434 100644 --- a/paddle/fluid/operators/distributed/large_scale_kv.h +++ b/paddle/fluid/operators/distributed/large_scale_kv.h @@ -14,20 +14,19 @@ #pragma once +#include #include #include #include // NOLINT #include #include +#include // NOLINT #include #include #include #include -#include // NOLINT - -#include #include "paddle/fluid/framework/generator.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/rw_lock.h" @@ -98,8 +97,8 @@ class UniformInitializer : public Initializer { } float GetValue() override { - return framework::Generator::GetInstance()->is_init_py - ? dist_(framework::Generator::GetInstance()->GetCPUEngine()) + return framework::DefaultCPUGenerator()->GetIsInitPy() + ? dist_(framework::DefaultCPUGenerator()->GetCPUEngine()) : dist_(random_engine_); // return dist_(random_engine_); } @@ -148,8 +147,8 @@ class GaussianInitializer : public Initializer { } float GetValue() override { - return framework::Generator::GetInstance()->is_init_py - ? dist_(framework::Generator::GetInstance()->GetCPUEngine()) + return framework::DefaultCPUGenerator()->GetIsInitPy() + ? dist_(framework::DefaultCPUGenerator()->GetCPUEngine()) : dist_(random_engine_); // return dist_(random_engine_); } diff --git a/paddle/fluid/operators/dropout_op.h b/paddle/fluid/operators/dropout_op.h index bce4c7ca19a60..bdb4fc4ddaef9 100644 --- a/paddle/fluid/operators/dropout_op.h +++ b/paddle/fluid/operators/dropout_op.h @@ -56,7 +56,7 @@ class CPUDropoutKernel : public framework::OpKernel { return; } - bool init_generator_py = framework::Generator::GetInstance()->is_init_py; + bool init_generator_py = framework::DefaultCPUGenerator()->GetIsInitPy(); // NOTE: fixed seed should only be used in unittest or for debug. // Guarantee to use random seed in training. @@ -76,7 +76,7 @@ class CPUDropoutKernel : public framework::OpKernel { for (size_t i = 0; i < size; ++i) { float cur_random = init_generator_py - ? dist(framework::Generator::GetInstance()->GetCPUEngine()) + ? dist(framework::DefaultCPUGenerator()->GetCPUEngine()) : dist(engine); if (cur_random < dropout_prob) { mask_data[i] = 0; diff --git a/paddle/fluid/operators/gaussian_random_op.cc b/paddle/fluid/operators/gaussian_random_op.cc index 111d4ad449007..cbadbbd09b1fe 100644 --- a/paddle/fluid/operators/gaussian_random_op.cc +++ b/paddle/fluid/operators/gaussian_random_op.cc @@ -40,9 +40,9 @@ class CPUGaussianRandomKernel : public framework::OpKernel { int64_t size = tensor->numel(); T* data = tensor->mutable_data(context.GetPlace()); - if (framework::Generator::GetInstance()->is_init_py) { + if (framework::DefaultCPUGenerator()->GetIsInitPy()) { std::mt19937_64& gen_engine = - framework::Generator::GetInstance()->GetCPUEngine(); + framework::DefaultCPUGenerator()->GetCPUEngine(); for (int64_t i = 0; i < size; ++i) { data[i] = dist(gen_engine); } diff --git a/paddle/fluid/operators/math/sampler.cc b/paddle/fluid/operators/math/sampler.cc index 86feaa72d5fa6..dc147d7b17bbe 100644 --- a/paddle/fluid/operators/math/sampler.cc +++ b/paddle/fluid/operators/math/sampler.cc @@ -13,11 +13,14 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/math/sampler.h" + #include + #include #include #include #include + #include "paddle/fluid/framework/generator.h" namespace paddle { @@ -33,8 +36,8 @@ UniformSampler::UniformSampler(int64_t range, unsigned int seed) } int64_t UniformSampler::Sample() const { - return framework::Generator::GetInstance()->is_init_py - ? (*dist_)(framework::Generator::GetInstance()->GetCPUEngine()) + return framework::DefaultCPUGenerator()->GetIsInitPy() + ? (*dist_)(framework::DefaultCPUGenerator()->GetCPUEngine()) : (*dist_)(*random_engine_); // return (*dist_)(*random_engine_); } @@ -53,8 +56,8 @@ int64_t LogUniformSampler::Sample() const { // More details: // https://wanghaoshuang.github.io/2017/11/Log-uniform-distribution-sampler/ auto cur_random = - framework::Generator::GetInstance()->is_init_py - ? (*dist_)(framework::Generator::GetInstance()->GetCPUEngine()) + framework::DefaultCPUGenerator()->GetIsInitPy() + ? (*dist_)(framework::DefaultCPUGenerator()->GetCPUEngine()) : (*dist_)(*random_engine_); const int64_t value = static_cast(exp(cur_random * log_range_)) - 1; // Mathematically, value should be <= range_, but might not be due to some @@ -85,13 +88,12 @@ CustomSampler::CustomSampler(int64_t range, const float *probabilities, int64_t CustomSampler::Sample() const { auto index = - framework::Generator::GetInstance()->is_init_py - ? (*int_dist_)(framework::Generator::GetInstance()->GetCPUEngine()) + framework::DefaultCPUGenerator()->GetIsInitPy() + ? (*int_dist_)(framework::DefaultCPUGenerator()->GetCPUEngine()) : (*int_dist_)(*random_engine_); - auto p = - framework::Generator::GetInstance()->is_init_py - ? (*real_dist_)(framework::Generator::GetInstance()->GetCPUEngine()) - : (*real_dist_)(*random_engine_); + auto p = framework::DefaultCPUGenerator()->GetIsInitPy() + ? (*real_dist_)(framework::DefaultCPUGenerator()->GetCPUEngine()) + : (*real_dist_)(*random_engine_); if (p > alias_probs_[index]) { int alias = alias_[index]; diff --git a/paddle/fluid/operators/mkldnn/gaussian_random_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/gaussian_random_mkldnn_op.cc index d0ecca78ae8b2..8386d16de7f34 100644 --- a/paddle/fluid/operators/mkldnn/gaussian_random_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/gaussian_random_mkldnn_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include + #include "paddle/fluid/framework/generator.h" #include "paddle/fluid/operators/fill_constant_op.h" #include "paddle/fluid/operators/mean_op.h" @@ -36,9 +37,9 @@ class GaussianMKLDNNKernel : public paddle::framework::OpKernel { int64_t size = tensor->numel(); std::normal_distribution dist(mean, std); - if (framework::Generator::GetInstance()->is_init_py) { + if (framework::DefaultCPUGenerator()->is_init_py) { std::mt19937_64& gen_engine = - framework::Generator::GetInstance()->GetCPUEngine(); + framework::DefaultCPUGenerator()->GetCPUEngine(); for (int64_t i = 0; i < size; ++i) { data[i] = dist(gen_engine); } diff --git a/paddle/fluid/operators/randint_op.cc b/paddle/fluid/operators/randint_op.cc index 662fe3bcb3b3b..e3ec8b45edfe2 100644 --- a/paddle/fluid/operators/randint_op.cc +++ b/paddle/fluid/operators/randint_op.cc @@ -47,9 +47,9 @@ class CPURandintKernel : public framework::OpKernel { std::uniform_int_distribution dist(ctx.Attr("low"), ctx.Attr("high") - 1); - if (framework::Generator::GetInstance()->is_init_py) { + if (framework::DefaultCPUGenerator()->GetIsInitPy()) { std::mt19937_64& gen_engine = - framework::Generator::GetInstance()->GetCPUEngine(); + framework::DefaultCPUGenerator()->GetCPUEngine(); for (int64_t i = 0; i < size; ++i) data[i] = dist(gen_engine); } else { unsigned int seed = static_cast(ctx.Attr("seed")); diff --git a/paddle/fluid/operators/randperm_op.h b/paddle/fluid/operators/randperm_op.h index 0eb028ad80684..001452c98c0d8 100644 --- a/paddle/fluid/operators/randperm_op.h +++ b/paddle/fluid/operators/randperm_op.h @@ -19,6 +19,7 @@ limitations under the License. */ #include #include #include + #include "paddle/fluid/framework/generator.h" #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/tensor_util.h" @@ -32,9 +33,9 @@ static inline void random_permate(T* data_ptr, int num, unsigned int seed) { for (int i = 0; i < num; ++i) { data_ptr[i] = static_cast(i); } - if (framework::Generator::GetInstance()->is_init_py) { + if (framework::DefaultCPUGenerator()->GetIsInitPy()) { std::shuffle(data_ptr, data_ptr + num, - framework::Generator::GetInstance()->GetCPUEngine()); + framework::DefaultCPUGenerator()->GetCPUEngine()); } else { if (seed == 0) { diff --git a/paddle/fluid/operators/sampling_id_op.h b/paddle/fluid/operators/sampling_id_op.h index a09220b1ccd13..4cdcf88158d37 100644 --- a/paddle/fluid/operators/sampling_id_op.h +++ b/paddle/fluid/operators/sampling_id_op.h @@ -62,8 +62,8 @@ class SamplingIdKernel : public framework::OpKernel { std::vector ids(batch_size); for (int i = 0; i < batch_size; ++i) { - T r = framework::Generator::GetInstance()->is_init_py - ? dist(framework::Generator::GetInstance()->GetCPUEngine()) + T r = framework::DefaultCPUGenerator()->GetIsInitPy() + ? dist(framework::DefaultCPUGenerator()->GetCPUEngine()) : dist(engine); int idx = width - 1; for (int j = 0; j < width; ++j) { diff --git a/paddle/fluid/operators/truncated_gaussian_random_op.cc b/paddle/fluid/operators/truncated_gaussian_random_op.cc index 3aa9ff544af63..5b7ffe43c9eaf 100644 --- a/paddle/fluid/operators/truncated_gaussian_random_op.cc +++ b/paddle/fluid/operators/truncated_gaussian_random_op.cc @@ -14,6 +14,7 @@ limitations under the License. */ #include #include + #include "paddle/fluid/framework/generator.h" #include "paddle/fluid/framework/op_registry.h" @@ -167,9 +168,9 @@ class CPUTruncatedGaussianRandomKernel : public framework::OpKernel { TruncatedNormal truncated_normal(mean, std); int64_t size = tensor->numel(); - if (framework::Generator::GetInstance()->is_init_py) { + if (framework::DefaultCPUGenerator()->GetIsInitPy()) { std::mt19937_64& gen_engine = - framework::Generator::GetInstance()->GetCPUEngine(); + framework::DefaultCPUGenerator()->GetCPUEngine(); for (int64_t i = 0; i < size; ++i) { data[i] = truncated_normal(dist(gen_engine)); } diff --git a/python/paddle/fluid/tests/unittests/test_random_seed.py b/python/paddle/fluid/tests/unittests/test_random_seed.py index 1310ef01f04bd..c024bce049f64 100644 --- a/python/paddle/fluid/tests/unittests/test_random_seed.py +++ b/python/paddle/fluid/tests/unittests/test_random_seed.py @@ -36,21 +36,21 @@ def test_generator_uniform_random_dygraph(self): fluid.enable_dygraph() gen = paddle.manual_seed(12312321111) - print(gen.initial_seed()) x = fluid.layers.uniform_random([10], dtype="float32", min=0.0, max=1.0) + st1 = gen.get_state() - print(st1) x1 = fluid.layers.uniform_random( [10], dtype="float32", min=0.0, max=1.0) - print(gen.initial_seed()) + gen.set_state(st1) print(gen.get_state()) x2 = fluid.layers.uniform_random( [10], dtype="float32", min=0.0, max=1.0) - print(gen.initial_seed()) + paddle.manual_seed(12312321111) x3 = fluid.layers.uniform_random( [10], dtype="float32", min=0.0, max=1.0) + x_np = x.numpy() x1_np = x1.numpy() x2_np = x2.numpy() @@ -61,11 +61,9 @@ def test_generator_uniform_random_dygraph(self): self.assertTrue(np.allclose(x_np, x3_np)) def test_generator_uniform_random_static(self): - fluid.disable_dygraph() - gen = generator.Generator() - gen.manual_seed(123123143) + gen = paddle.manual_seed(123123143) startup_program = fluid.Program() train_program = fluid.Program() @@ -97,11 +95,9 @@ def test_generator_uniform_random_static(self): self.assertTrue(not np.allclose(out1_res2, out1_res1)) def test_gen_dropout_dygraph(self): - gen = generator.Generator() - fluid.enable_dygraph() - gen.manual_seed(111111111) + gen = paddle.manual_seed(111111111) st = gen.get_state() # x = np.arange(1,101).reshape(2,50).astype("float32") x = fluid.layers.uniform_random( @@ -114,8 +110,7 @@ def test_gen_dropout_dygraph(self): y1 = fluid.layers.dropout(x1, 0.5) y_np = y.numpy() y1_np = y1.numpy() - #print(y_np) - #print(y1_np) + if not core.is_compiled_with_cuda(): print(">>>>>>> dropout dygraph >>>>>>>") self.assertTrue(np.allclose(y_np, y1_np)) @@ -123,8 +118,7 @@ def test_gen_dropout_dygraph(self): def test_gen_dropout_static(self): fluid.disable_dygraph() - gen = generator.Generator() - gen.manual_seed(123123143) + gen = paddle.manual_seed(123123143) startup_program = fluid.Program() train_program = fluid.Program() @@ -141,19 +135,16 @@ def test_gen_dropout_static(self): out2 = exe.run(train_program, feed={}, fetch_list=[y_1]) out1_np = np.array(out1[0]) out2_np = np.array(out2[0]) - # print(out1_np) - # print(out2_np) + if not core.is_compiled_with_cuda(): print(">>>>>>> dropout static >>>>>>>") self.assertTrue(np.allclose(out1_np, out2_np)) def test_generator_gaussian_random_dygraph(self): """Test Generator seed.""" - gen = generator.Generator() - fluid.enable_dygraph() - gen.manual_seed(12312321111) + gen = paddle.manual_seed(12312321111) x = fluid.layers.gaussian_random([10], dtype="float32") st1 = gen.get_state() x1 = fluid.layers.gaussian_random([10], dtype="float32") @@ -172,11 +163,9 @@ def test_generator_gaussian_random_dygraph(self): self.assertTrue(np.allclose(x_np, x3_np)) def test_generator_gaussian_random_static(self): - fluid.disable_dygraph() - gen = generator.Generator() - gen.manual_seed(123123143) + gen = paddle.manual_seed(123123143) startup_program = fluid.Program() train_program = fluid.Program() @@ -214,7 +203,7 @@ def test_generator_randint_dygraph(self): fluid.enable_dygraph() - gen.manual_seed(12312321111) + gen = paddle.manual_seed(12312321111) x = paddle.randint(low=10, shape=[10], dtype="int32") st1 = gen.get_state() x1 = paddle.randint(low=10, shape=[10], dtype="int32") @@ -233,11 +222,9 @@ def test_generator_randint_dygraph(self): self.assertTrue(np.allclose(x_np, x3_np)) def test_generator_uniform_random_static(self): - fluid.disable_dygraph() - gen = generator.Generator() - gen.manual_seed(123123143) + gen = paddle.manual_seed(123123143) startup_program = fluid.Program() train_program = fluid.Program() @@ -270,11 +257,9 @@ def test_generator_uniform_random_static(self): def test_generator_randint_dygraph(self): """Test Generator seed.""" - gen = generator.Generator() - fluid.enable_dygraph() - gen.manual_seed(12312321111) + gen = paddle.manual_seed(12312321111) x = paddle.randint(low=1) st1 = gen.get_state() x1 = paddle.randint(low=1) @@ -290,13 +275,10 @@ def test_generator_randint_dygraph(self): self.assertTrue(np.allclose(x1_np, x2_np)) self.assertTrue(np.allclose(x_np, x3_np)) - def test_generator_ranint_static(self): - fluid.disable_dygraph() - gen = generator.Generator() - gen.manual_seed(123123143) + gen = paddle.manual_seed(123123143) startup_program = fluid.Program() train_program = fluid.Program() @@ -330,11 +312,10 @@ def test_generator_ranint_static(self): def test_generator_randperm_dygraph(self): """Test Generator seed.""" - gen = generator.Generator() fluid.enable_dygraph() - gen.manual_seed(12312321111) + gen = paddle.manual_seed(12312321111) x = paddle.randperm(10) st1 = gen.get_state() x1 = paddle.randperm(10) @@ -347,9 +328,6 @@ def test_generator_randperm_dygraph(self): x2_np = x2.numpy() x3_np = x3.numpy() - # print("## {}".format(x1_np)) - # print("## {}".format(x2_np)) - if not core.is_compiled_with_cuda(): print(">>>>>>> randperm dygraph >>>>>>>") self.assertTrue(np.allclose(x1_np, x2_np)) @@ -359,8 +337,7 @@ def test_generator_randperm_static(self): fluid.disable_dygraph() - gen = generator.Generator() - gen.manual_seed(123123143) + paddle.manual_seed(123123143) startup_program = fluid.Program() train_program = fluid.Program() @@ -375,8 +352,8 @@ def test_generator_randperm_static(self): out1 = exe.run(train_program, feed={}, fetch_list=[result_1, result_2]) - #gen.set_state(cur_state) - gen.manual_seed(123123143) + + paddle.manual_seed(123123143) out2 = exe.run(train_program, feed={}, fetch_list=[result_1, result_2]) @@ -394,7 +371,7 @@ def test_generator_randperm_static(self): def test_generator_sampling_id_dygraph(self): """Test Generator seed.""" - gen = generator.Generator() + gen = paddle.manual_seed(12312321111) fluid.enable_dygraph() @@ -402,14 +379,17 @@ def test_generator_sampling_id_dygraph(self): x = fluid.layers.uniform_random( [10, 10], dtype="float32", min=0.0, max=1.0) y = fluid.layers.sampling_id(x) + st1 = gen.get_state() x1 = fluid.layers.uniform_random( [10, 10], dtype="float32", min=0.0, max=1.0) y1 = fluid.layers.sampling_id(x) + gen.set_state(st1) x2 = fluid.layers.uniform_random( [10, 10], dtype="float32", min=0.0, max=1.0) y2 = fluid.layers.sampling_id(x) + gen.manual_seed(12312321111) x3 = fluid.layers.uniform_random( [10, 10], dtype="float32", min=0.0, max=1.0) @@ -420,9 +400,6 @@ def test_generator_sampling_id_dygraph(self): x2_np = y2.numpy() x3_np = y3.numpy() - print("## {}".format(x1_np)) - print("## {}".format(x2_np)) - if not core.is_compiled_with_cuda(): print(">>>>>>> sampling id dygraph >>>>>>>") self.assertTrue(np.allclose(x1_np, x2_np)) @@ -432,8 +409,7 @@ def test_generator_randperm_static(self): fluid.disable_dygraph() - gen = generator.Generator() - gen.manual_seed(123123143) + paddle.manual_seed(123123143) startup_program = fluid.Program() train_program = fluid.Program() @@ -449,8 +425,8 @@ def test_generator_randperm_static(self): out1 = exe.run(train_program, feed={}, fetch_list=[result_1, result_2]) - #gen.set_state(cur_state) - gen.manual_seed(123123143) + + paddle.manual_seed(123123143) out2 = exe.run(train_program, feed={}, fetch_list=[result_1, result_2]) @@ -469,8 +445,7 @@ def test_generator_randperm_static(self): def test_gen_TruncatedNormal_initializer(self): fluid.disable_dygraph() - gen = generator.Generator() - gen.manual_seed(123123143) + gen = paddle.manual_seed(123123143) cur_state = gen.get_state() startup_program = fluid.Program() @@ -495,9 +470,7 @@ def test_gen_TruncatedNormal_initializer(self): out1 = exe.run(train_program, feed={}, fetch_list=[result_1, result_2]) - #gen.set_state(cur_state) - #gen.set_state(cur_state) gen.manual_seed(123123143) with fluid.program_guard(train_program, startup_program): exe.run(startup_program) @@ -510,11 +483,6 @@ def test_gen_TruncatedNormal_initializer(self): out2_res1 = np.array(out2[0]) out2_res2 = np.array(out2[1]) - print(out1_res1) - print(out1_res2) - print(out2_res1) - print(out2_res2) - if not core.is_compiled_with_cuda(): print(">>>>>>> sampling id static >>>>>>>") self.assertTrue(np.allclose(out1_res1, out2_res1)) diff --git a/python/paddle/framework/random.py b/python/paddle/framework/random.py index c8bb4eb7da5ab..886daecfc99b9 100644 --- a/python/paddle/framework/random.py +++ b/python/paddle/framework/random.py @@ -47,6 +47,5 @@ def manual_seed(seed): program = fluid.Program() program.global_seed(seed) - core.default_cpu_generator()._is_init_py = False - print(core.default_cpu_generator()._is_init_py) + core.default_cpu_generator()._is_init_py = True return core.default_cpu_generator().manual_seed(seed) From a3310e03e91f20a3ae152f8407cfc32dd9eaab96 Mon Sep 17 00:00:00 2001 From: zhiqiu Date: Mon, 24 Aug 2020 18:02:13 +0800 Subject: [PATCH 17/28] fix manual_seed --- .../unittests/test_dygraph_multi_forward.py | 12 ++--- .../fluid/tests/unittests/test_fc_op.py | 49 ++++++++++--------- .../unittests/test_gaussian_random_op.py | 3 +- python/paddle/framework/random.py | 2 +- 4 files changed, 34 insertions(+), 32 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_dygraph_multi_forward.py b/python/paddle/fluid/tests/unittests/test_dygraph_multi_forward.py index ae4355ec412c8..3604e844df154 100644 --- a/python/paddle/fluid/tests/unittests/test_dygraph_multi_forward.py +++ b/python/paddle/fluid/tests/unittests/test_dygraph_multi_forward.py @@ -27,6 +27,8 @@ from paddle.fluid.dygraph.base import to_variable from test_imperative_base import new_program_scope +SEED = 123123111 + class SimpleImgConvPool(fluid.dygraph.Layer): def __init__(self, @@ -105,12 +107,10 @@ def forward(self, inputs): class TestDygraphMultiForward(unittest.TestCase): def test_mnist_forward_float32(self): - seed = 90 epoch_num = 1 - with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + with fluid.dygraph.guard(): + paddle.manual_seed(SEED) mnist = MNIST() sgd = SGDOptimizer( learning_rate=1e-3, parameter_list=mnist.parameters()) @@ -142,9 +142,7 @@ def test_mnist_forward_float32(self): dy_param_init_value[param.name] = param.numpy() with new_program_scope(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed - + paddle.manual_seed(SEED) exe = fluid.Executor(fluid.CPUPlace( ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) diff --git a/python/paddle/fluid/tests/unittests/test_fc_op.py b/python/paddle/fluid/tests/unittests/test_fc_op.py index a0514b62e53f6..ec30cb70c5790 100644 --- a/python/paddle/fluid/tests/unittests/test_fc_op.py +++ b/python/paddle/fluid/tests/unittests/test_fc_op.py @@ -136,29 +136,32 @@ def config(self): class TestFcOp_NumFlattenDims_NegOne(unittest.TestCase): def test_api(self): - paddle.manual_seed(SEED) - startup_program = Program() - main_program = Program() - - with program_guard(main_program, startup_program): - input = np.random.random([2, 2, 25]).astype("float32") - x = fluid.layers.data( - name="x", - shape=[2, 2, 25], - append_batch_size=False, - dtype="float32") - - out_1 = fluid.layers.fc(input=x, size=1, num_flatten_dims=-1) - out_2 = fluid.layers.fc(input=x, size=1, num_flatten_dims=2) - - place = fluid.CPUPlace() if not core.is_compiled_with_cuda( - ) else fluid.CUDAPlace(0) - exe = fluid.Executor(place=place) - exe.run(startup_program) - res_1, res_2 = exe.run(main_program, - feed={"x": input}, - fetch_list=[out_1, out_2]) - assert np.array_equal(res_1, res_2) + def run_program(num_flatten_dims): + paddle.manual_seed(SEED) + startup_program = Program() + main_program = Program() + + with program_guard(main_program, startup_program): + input = np.random.random([2, 2, 25]).astype("float32") + x = fluid.layers.data( + name="x", + shape=[2, 2, 25], + append_batch_size=False, + dtype="float32") + + out = fluid.layers.fc(input=x, + size=1, + num_flatten_dims=num_flatten_dims) + + place = fluid.CPUPlace() if not core.is_compiled_with_cuda( + ) else fluid.CUDAPlace(0) + exe = fluid.Executor(place=place) + exe.run(startup_program) + out = exe.run(main_program, feed={"x": input}, fetch_list=[out]) + + res_1 = run_program(-1) + res_2 = run_program(2) + self.assertTrue(np.array_equal(res_1, res_2)) class TestFCOpError(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py b/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py index 6b08c4250f61c..9ab8440407390 100644 --- a/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py +++ b/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py @@ -16,7 +16,7 @@ import unittest import numpy as np - +import paddle import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid.op import Operator @@ -37,6 +37,7 @@ def setUp(self): "seed": 10, "use_mkldnn": self.use_mkldnn } + paddle.manual_seed(10) self.outputs = {'Out': np.zeros((123, 92), dtype='float32')} diff --git a/python/paddle/framework/random.py b/python/paddle/framework/random.py index 886daecfc99b9..69dd02cecadf1 100644 --- a/python/paddle/framework/random.py +++ b/python/paddle/framework/random.py @@ -47,5 +47,5 @@ def manual_seed(seed): program = fluid.Program() program.global_seed(seed) - core.default_cpu_generator()._is_init_py = True + core.default_cpu_generator()._is_init_py = False return core.default_cpu_generator().manual_seed(seed) From 464f8c44c77a7f35496b9b9684d00badba754f05 Mon Sep 17 00:00:00 2001 From: zhiqiu Date: Tue, 25 Aug 2020 10:50:15 +0800 Subject: [PATCH 18/28] fix manual_seed --- python/paddle/framework/random.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/paddle/framework/random.py b/python/paddle/framework/random.py index 69dd02cecadf1..886daecfc99b9 100644 --- a/python/paddle/framework/random.py +++ b/python/paddle/framework/random.py @@ -47,5 +47,5 @@ def manual_seed(seed): program = fluid.Program() program.global_seed(seed) - core.default_cpu_generator()._is_init_py = False + core.default_cpu_generator()._is_init_py = True return core.default_cpu_generator().manual_seed(seed) From c3e343530fb1063d9aad502c11bacc685b04bd5b Mon Sep 17 00:00:00 2001 From: zhiqiu Date: Wed, 26 Aug 2020 11:13:45 +0800 Subject: [PATCH 19/28] use global engine --- paddle/fluid/framework/generator.cc | 27 ++++++++++++++ paddle/fluid/framework/generator.h | 6 ++++ .../operators/distributed/large_scale_kv.h | 29 ++++----------- paddle/fluid/operators/dropout_op.h | 14 ++------ paddle/fluid/operators/gaussian_random_op.cc | 22 +++--------- paddle/fluid/operators/math/sampler.cc | 36 +++++++------------ paddle/fluid/operators/math/sampler.h | 18 +++++----- .../mkldnn/gaussian_random_mkldnn_op.cc | 20 +++-------- paddle/fluid/operators/randint_op.cc | 19 +++------- paddle/fluid/operators/randperm_op.h | 12 ++----- paddle/fluid/operators/sampling_id_op.h | 11 ++---- .../operators/truncated_gaussian_random_op.cc | 20 +++-------- paddle/fluid/operators/uniform_random_op.cc | 33 ++++++----------- .../tests/unittests/test_uniform_random_op.py | 3 +- 14 files changed, 98 insertions(+), 172 deletions(-) diff --git a/paddle/fluid/framework/generator.cc b/paddle/fluid/framework/generator.cc index 6c84324474963..bd32e6beaaeee 100644 --- a/paddle/fluid/framework/generator.cc +++ b/paddle/fluid/framework/generator.cc @@ -31,6 +31,33 @@ const std::shared_ptr& DefaultCPUGenerator() { return default_cpu_generator; } +const std::shared_ptr& OpDefaultCPUEngine() { + static auto op_default_cpu_engine = std::make_shared(); + return op_default_cpu_engine; +} + +// NOTE(zhiqiu): there are 3 conditions: +// (1) op seed is not set and DefaultCPUGenerator is inited, use +// DefaultCPUGenerator +// (2) op seed is not set and DefaultCPUGenerator is not inited, use se +// OpDefaultCPUEngine() and set a radnom seed +// (3) op seed is set, use OpDefaultCPUEngine() and set the seed +const std::mt19937_64& GetCPURandomEngine(uint64_t seed) { + if (DefaultCPUGenerator()->GetIsInitPy() && seed == 0) { + return DefaultCPUGenerator()->GetCPUEngine(); + } else { + if (seed == 0) { + seed = GetRandomSeed(); + } + static std::mutex mu_; + { + std::lock_guard lock(mu_); + OpDefaultCPUEngine()->seed(seed); + } + return *OpDefaultCPUEngine(); + } +} + GeneratorState* Generator::GetState() { std::lock_guard lock(this->mutex); return this->state_.get(); diff --git a/paddle/fluid/framework/generator.h b/paddle/fluid/framework/generator.h index b452cac77f456..5527c33092b99 100644 --- a/paddle/fluid/framework/generator.h +++ b/paddle/fluid/framework/generator.h @@ -95,7 +95,13 @@ struct Generator { bool is_init_py_ = false; }; +// The DefaultCPUGenerator is used in manual_seed() const std::shared_ptr& DefaultCPUGenerator(); +// If op seed is set or global is not set, the OpDefaultCPUEngine is used. +const std::shared_ptr& OpDefaultCPUEngine(); + +const std::mt19937_64& GetCPURandomEngine(uint64_t); + } // namespace framework } // namespace paddle diff --git a/paddle/fluid/operators/distributed/large_scale_kv.h b/paddle/fluid/operators/distributed/large_scale_kv.h index 44e49240a5434..ee35799c5d07c 100644 --- a/paddle/fluid/operators/distributed/large_scale_kv.h +++ b/paddle/fluid/operators/distributed/large_scale_kv.h @@ -88,26 +88,17 @@ class UniformInitializer : public Initializer { min_ = std::stof(attrs[2]); max_ = std::stof(attrs[3]); - if (seed_ == 0) { - seed_ = std::random_device()(); - } - - random_engine_.seed(seed_); dist_ = std::uniform_real_distribution(min_, max_); + random_engine_ = framework::GetCPURandomEngine(seed_); } - float GetValue() override { - return framework::DefaultCPUGenerator()->GetIsInitPy() - ? dist_(framework::DefaultCPUGenerator()->GetCPUEngine()) - : dist_(random_engine_); - // return dist_(random_engine_); - } + float GetValue() override { return dist_(random_engine_); } private: float min_; float max_; - std::minstd_rand random_engine_; + std::mt19937_64 random_engine_; std::uniform_real_distribution dist_; }; @@ -138,26 +129,18 @@ class GaussianInitializer : public Initializer { mean_ = std::stof(attrs[2]); std_ = std::stof(attrs[3]); - if (seed_ == 0) { - seed_ = std::random_device()(); - } + random_engine_ = framework::GetCPURandomEngine(seed_); - random_engine_.seed(seed_); dist_ = std::normal_distribution(mean_, std_); } - float GetValue() override { - return framework::DefaultCPUGenerator()->GetIsInitPy() - ? dist_(framework::DefaultCPUGenerator()->GetCPUEngine()) - : dist_(random_engine_); - // return dist_(random_engine_); - } + float GetValue() override { return dist_(random_engine_); } private: float std_; float mean_; - std::minstd_rand random_engine_; + std::mt19937_64 random_engine_; std::normal_distribution dist_; }; diff --git a/paddle/fluid/operators/dropout_op.h b/paddle/fluid/operators/dropout_op.h index bdb4fc4ddaef9..915759d1651d1 100644 --- a/paddle/fluid/operators/dropout_op.h +++ b/paddle/fluid/operators/dropout_op.h @@ -56,29 +56,21 @@ class CPUDropoutKernel : public framework::OpKernel { return; } - bool init_generator_py = framework::DefaultCPUGenerator()->GetIsInitPy(); - // NOTE: fixed seed should only be used in unittest or for debug. // Guarantee to use random seed in training. - std::random_device rnd; - std::minstd_rand engine; int seed_data; if (seed) { seed_data = *(seed->data()); } else { - seed_data = - context.Attr("fix_seed") ? context.Attr("seed") : rnd(); + seed_data = context.Attr("seed"); } + auto engine = framework::GetCPURandomEngine(seed_data); engine.seed(seed_data); std::uniform_real_distribution dist(0, 1); for (size_t i = 0; i < size; ++i) { - float cur_random = - init_generator_py - ? dist(framework::DefaultCPUGenerator()->GetCPUEngine()) - : dist(engine); - if (cur_random < dropout_prob) { + if (dist(engine) < dropout_prob) { mask_data[i] = 0; y_data[i] = 0; } else { diff --git a/paddle/fluid/operators/gaussian_random_op.cc b/paddle/fluid/operators/gaussian_random_op.cc index cbadbbd09b1fe..6db920b4af7c0 100644 --- a/paddle/fluid/operators/gaussian_random_op.cc +++ b/paddle/fluid/operators/gaussian_random_op.cc @@ -39,26 +39,14 @@ class CPUGaussianRandomKernel : public framework::OpKernel { tensor->Resize(shape); int64_t size = tensor->numel(); T* data = tensor->mutable_data(context.GetPlace()); + unsigned int seed = static_cast(context.Attr("seed")); + auto engine = framework::GetCPURandomEngine(seed); - if (framework::DefaultCPUGenerator()->GetIsInitPy()) { - std::mt19937_64& gen_engine = - framework::DefaultCPUGenerator()->GetCPUEngine(); - for (int64_t i = 0; i < size; ++i) { - data[i] = dist(gen_engine); - } - } else { - unsigned int seed = static_cast(context.Attr("seed")); - std::minstd_rand engine; - if (seed == 0) { - seed = std::random_device()(); - } - engine.seed(seed); - for (int64_t i = 0; i < size; ++i) { - data[i] = dist(engine); - } + for (int64_t i = 0; i < size; ++i) { + data[i] = dist(engine); } } -}; +}; // namespace operators template class CPUGaussianRandomBatchSizeLikeKernel : public framework::OpKernel { diff --git a/paddle/fluid/operators/math/sampler.cc b/paddle/fluid/operators/math/sampler.cc index dc147d7b17bbe..3ce642e13a4af 100644 --- a/paddle/fluid/operators/math/sampler.cc +++ b/paddle/fluid/operators/math/sampler.cc @@ -31,23 +31,18 @@ Sampler::~Sampler() {} UniformSampler::UniformSampler(int64_t range, unsigned int seed) : Sampler(range, seed), inv_range_(1.0 / (range + 1)) { - random_engine_ = std::make_shared(seed_); - dist_ = std::make_shared>(0, range); + random_engine_ = framework::GetCPURandomEngine(seed_); + dist_ = std::make_shared>(0, range); } -int64_t UniformSampler::Sample() const { - return framework::DefaultCPUGenerator()->GetIsInitPy() - ? (*dist_)(framework::DefaultCPUGenerator()->GetCPUEngine()) - : (*dist_)(*random_engine_); - // return (*dist_)(*random_engine_); -} +int64_t UniformSampler::Sample() const { return (*dist_)(random_engine_); } float UniformSampler::Probability(int64_t value) const { return inv_range_; } LogUniformSampler::LogUniformSampler(int64_t range, unsigned int seed) : Sampler(range, seed), log_range_(log(range + 1)) { - random_engine_ = std::make_shared(seed_); - dist_ = std::make_shared>(0, 1); + random_engine_ = framework::GetCPURandomEngine(seed_); + dist_ = std::make_shared>(0, 1); } int64_t LogUniformSampler::Sample() const { @@ -55,10 +50,7 @@ int64_t LogUniformSampler::Sample() const { // inverse_transform_sampling method // More details: // https://wanghaoshuang.github.io/2017/11/Log-uniform-distribution-sampler/ - auto cur_random = - framework::DefaultCPUGenerator()->GetIsInitPy() - ? (*dist_)(framework::DefaultCPUGenerator()->GetCPUEngine()) - : (*dist_)(*random_engine_); + auto cur_random = (*dist_)(random_engine_); const int64_t value = static_cast(exp(cur_random * log_range_)) - 1; // Mathematically, value should be <= range_, but might not be due to some // floating point roundoff, so we mod by range_. @@ -77,9 +69,10 @@ CustomSampler::CustomSampler(int64_t range, const float *probabilities, const int *alias, const float *alias_probabilities, unsigned int seed) : Sampler(range, seed) { - random_engine_ = std::make_shared(seed_); - real_dist_ = std::make_shared>(0, 1); - int_dist_ = std::make_shared>(0, range); + random_engine_ = framework::GetCPURandomEngine(seed_); + real_dist_ = std::make_shared>(0, 1); + int_dist_ = + std::make_shared>(0, range); alias_probs_ = alias_probabilities; probs_ = probabilities; @@ -87,13 +80,8 @@ CustomSampler::CustomSampler(int64_t range, const float *probabilities, } int64_t CustomSampler::Sample() const { - auto index = - framework::DefaultCPUGenerator()->GetIsInitPy() - ? (*int_dist_)(framework::DefaultCPUGenerator()->GetCPUEngine()) - : (*int_dist_)(*random_engine_); - auto p = framework::DefaultCPUGenerator()->GetIsInitPy() - ? (*real_dist_)(framework::DefaultCPUGenerator()->GetCPUEngine()) - : (*real_dist_)(*random_engine_); + auto index = (*int_dist_)(random_engine_); + auto p = (*real_dist_)(random_engine_); if (p > alias_probs_[index]) { int alias = alias_[index]; diff --git a/paddle/fluid/operators/math/sampler.h b/paddle/fluid/operators/math/sampler.h index 3fa5a7ae336a9..11e2bf521c5ff 100644 --- a/paddle/fluid/operators/math/sampler.h +++ b/paddle/fluid/operators/math/sampler.h @@ -26,8 +26,8 @@ namespace math { // TODO(wanghaoshuang): Support for GPU /** -* Sample integers from [0, range). -*/ + * Sample integers from [0, range). + */ class Sampler { public: explicit Sampler(int64_t range, unsigned int seed = 0UL) : range_(range) { @@ -72,8 +72,8 @@ class UniformSampler : public Sampler { private: const float inv_range_; - std::shared_ptr random_engine_; - std::shared_ptr> dist_; + std::mt19937_64 random_engine_; + std::shared_ptr> dist_; }; /** @@ -93,8 +93,8 @@ class LogUniformSampler : public Sampler { private: const float log_range_; - std::shared_ptr random_engine_; - std::shared_ptr> dist_; + std::mt19937_64 random_engine_; + std::shared_ptr> dist_; }; /** @@ -117,9 +117,9 @@ class CustomSampler : public Sampler { const int* alias_; const float* probs_; const int exceptional_val = -1; - std::shared_ptr random_engine_; - std::shared_ptr> real_dist_; - std::shared_ptr> int_dist_; + std::mt19937_64 random_engine_; + std::shared_ptr> real_dist_; + std::shared_ptr> int_dist_; }; } // namespace math diff --git a/paddle/fluid/operators/mkldnn/gaussian_random_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/gaussian_random_mkldnn_op.cc index 8386d16de7f34..f4037fb3107cc 100644 --- a/paddle/fluid/operators/mkldnn/gaussian_random_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/gaussian_random_mkldnn_op.cc @@ -36,23 +36,11 @@ class GaussianMKLDNNKernel : public paddle::framework::OpKernel { T* data = tensor->mutable_data(context.GetPlace()); int64_t size = tensor->numel(); std::normal_distribution dist(mean, std); + unsigned int seed = static_cast(context.Attr("seed")); + auto engine = framework::GetCPURandomEngine(seed); - if (framework::DefaultCPUGenerator()->is_init_py) { - std::mt19937_64& gen_engine = - framework::DefaultCPUGenerator()->GetCPUEngine(); - for (int64_t i = 0; i < size; ++i) { - data[i] = dist(gen_engine); - } - } else { - unsigned int seed = static_cast(context.Attr("seed")); - std::minstd_rand engine; - if (seed == 0) { - seed = std::random_device()(); - } - engine.seed(seed); - for (int64_t i = 0; i < size; ++i) { - data[i] = dist(engine); - } + for (int64_t i = 0; i < size; ++i) { + data[i] = dist(gen_engine); } tensor->set_layout(DataLayout::kMKLDNN); diff --git a/paddle/fluid/operators/randint_op.cc b/paddle/fluid/operators/randint_op.cc index e3ec8b45edfe2..de9ab7f62938e 100644 --- a/paddle/fluid/operators/randint_op.cc +++ b/paddle/fluid/operators/randint_op.cc @@ -46,22 +46,11 @@ class CPURandintKernel : public framework::OpKernel { std::uniform_int_distribution dist(ctx.Attr("low"), ctx.Attr("high") - 1); + unsigned int seed = static_cast(ctx.Attr("seed")); + auto engine = framework::GetCPURandomEngine(seed); - if (framework::DefaultCPUGenerator()->GetIsInitPy()) { - std::mt19937_64& gen_engine = - framework::DefaultCPUGenerator()->GetCPUEngine(); - for (int64_t i = 0; i < size; ++i) data[i] = dist(gen_engine); - } else { - unsigned int seed = static_cast(ctx.Attr("seed")); - std::minstd_rand engine; - if (seed == 0) { - seed = std::random_device()(); - } - engine.seed(seed); - - for (int64_t i = 0; i < size; ++i) { - data[i] = dist(engine); - } + for (int64_t i = 0; i < size; ++i) { + data[i] = dist(engine); } } }; diff --git a/paddle/fluid/operators/randperm_op.h b/paddle/fluid/operators/randperm_op.h index 001452c98c0d8..0f14bf509b2de 100644 --- a/paddle/fluid/operators/randperm_op.h +++ b/paddle/fluid/operators/randperm_op.h @@ -30,20 +30,12 @@ namespace operators { template static inline void random_permate(T* data_ptr, int num, unsigned int seed) { + auto engine = framework::GetCPURandomEngine(seed); for (int i = 0; i < num; ++i) { data_ptr[i] = static_cast(i); } - if (framework::DefaultCPUGenerator()->GetIsInitPy()) { - std::shuffle(data_ptr, data_ptr + num, - framework::DefaultCPUGenerator()->GetCPUEngine()); - } else { - if (seed == 0) { - seed = std::random_device()(); - } - std::srand(seed); - std::random_shuffle(data_ptr, data_ptr + num); - } + std::shuffle(data_ptr, data_ptr + num, engine); } template diff --git a/paddle/fluid/operators/sampling_id_op.h b/paddle/fluid/operators/sampling_id_op.h index 4cdcf88158d37..d8dcbfde1e407 100644 --- a/paddle/fluid/operators/sampling_id_op.h +++ b/paddle/fluid/operators/sampling_id_op.h @@ -51,20 +51,15 @@ class SamplingIdKernel : public framework::OpKernel { framework::TensorToVector(*input, context.device_context(), &ins_vector); unsigned int seed = static_cast(context.Attr("seed")); - std::minstd_rand engine; - if (seed == 0) { - seed = std::random_device()(); - } - engine.seed(seed); + std::uniform_real_distribution dist( static_cast(context.Attr("min")), static_cast(context.Attr("max"))); + auto engine = framework::GetCPURandomEngine(seed); std::vector ids(batch_size); for (int i = 0; i < batch_size; ++i) { - T r = framework::DefaultCPUGenerator()->GetIsInitPy() - ? dist(framework::DefaultCPUGenerator()->GetCPUEngine()) - : dist(engine); + T r = dist(engine); int idx = width - 1; for (int j = 0; j < width; ++j) { if ((r -= ins_vector[i * width + j]) < 0) { diff --git a/paddle/fluid/operators/truncated_gaussian_random_op.cc b/paddle/fluid/operators/truncated_gaussian_random_op.cc index 5b7ffe43c9eaf..58e886b7d4e0e 100644 --- a/paddle/fluid/operators/truncated_gaussian_random_op.cc +++ b/paddle/fluid/operators/truncated_gaussian_random_op.cc @@ -168,22 +168,10 @@ class CPUTruncatedGaussianRandomKernel : public framework::OpKernel { TruncatedNormal truncated_normal(mean, std); int64_t size = tensor->numel(); - if (framework::DefaultCPUGenerator()->GetIsInitPy()) { - std::mt19937_64& gen_engine = - framework::DefaultCPUGenerator()->GetCPUEngine(); - for (int64_t i = 0; i < size; ++i) { - data[i] = truncated_normal(dist(gen_engine)); - } - } else { - unsigned int seed = static_cast(context.Attr("seed")); - std::minstd_rand engine; - if (seed == 0) { - seed = std::random_device()(); - } - engine.seed(seed); - for (int64_t i = 0; i < size; ++i) { - data[i] = truncated_normal(dist(engine)); - } + unsigned int seed = static_cast(context.Attr("seed")); + auto engine = framework::GetCPURandomEngine(seed); + for (int64_t i = 0; i < size; ++i) { + data[i] = truncated_normal(dist(engine)); } } }; diff --git a/paddle/fluid/operators/uniform_random_op.cc b/paddle/fluid/operators/uniform_random_op.cc index 885c0d57d2d53..be0da433e6268 100644 --- a/paddle/fluid/operators/uniform_random_op.cc +++ b/paddle/fluid/operators/uniform_random_op.cc @@ -64,22 +64,11 @@ class CPUUniformRandomKernel : public framework::OpKernel { std::uniform_real_distribution dist( static_cast(ctx.Attr("min")), static_cast(ctx.Attr("max"))); - auto gen_ptr = framework::DefaultCPUGenerator(); - if (gen_ptr->GetIsInitPy()) { - std::mt19937_64 &gen_engine = gen_ptr->GetCPUEngine(); - for (int64_t i = 0; i < size; ++i) { - data[i] = dist(gen_engine); - } - } else { - unsigned int seed = static_cast(ctx.Attr("seed")); - std::minstd_rand engine; - if (seed == 0) { - seed = std::random_device()(); - } - engine.seed(seed); - for (int64_t i = 0; i < size; ++i) { - data[i] = dist(engine); - } + unsigned int seed = static_cast(ctx.Attr("seed")); + auto engine = framework::GetCPURandomEngine(seed); + + for (int64_t i = 0; i < size; ++i) { + data[i] = dist(engine); } unsigned int diag_num = @@ -130,12 +119,12 @@ class UniformRandomOp : public framework::OperatorWithKernel { if (ctx->HasInputs("ShapeTensorList")) { // top prority shape auto inputs_name = ctx->Inputs("ShapeTensorList"); - PADDLE_ENFORCE_GT( - inputs_name.size(), 0, - platform::errors::InvalidArgument( - "Input(ShapeTensorList)'size of Op(uniform_random) can't be zero." - "Please check the Attr(shape)'s size of" - "Op(fluid.layers.uniform_random).)")); + PADDLE_ENFORCE_GT(inputs_name.size(), 0, + platform::errors::InvalidArgument( + "Input(ShapeTensorList)'size of " + "Op(uniform_random) can't be zero." + "Please check the Attr(shape)'s size of" + "Op(fluid.layers.uniform_random).)")); auto out_dims = std::vector(inputs_name.size(), -1); ctx->SetOutputDim("Out", framework::make_ddim(out_dims)); diff --git a/python/paddle/fluid/tests/unittests/test_uniform_random_op.py b/python/paddle/fluid/tests/unittests/test_uniform_random_op.py index b5ca1468a0c35..a04aaaef0d41b 100644 --- a/python/paddle/fluid/tests/unittests/test_uniform_random_op.py +++ b/python/paddle/fluid/tests/unittests/test_uniform_random_op.py @@ -350,7 +350,8 @@ def test_attr_tensor_int32_API(self): class TestUniformRandomOp_API_seed(unittest.TestCase): def test_attr_tensor_API(self): _seed = 10 - paddle.manual_seed(_seed) + gen = paddle.manual_seed(_seed) + gen._is_init_py = False startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(train_program, startup_program): From a2430f5dec040425518c08542b9e76c29ecf76d3 Mon Sep 17 00:00:00 2001 From: zhiqiu Date: Wed, 26 Aug 2020 22:56:47 +0800 Subject: [PATCH 20/28] use shared_ptr --- paddle/fluid/framework/generator.cc | 19 ++++++++++--------- paddle/fluid/framework/generator.h | 14 +++++++------- paddle/fluid/operators/bernoulli_op.cc | 4 ++-- .../operators/distributed/large_scale_kv.h | 8 ++++---- paddle/fluid/operators/dropout_op.h | 3 +-- paddle/fluid/operators/gaussian_random_op.cc | 2 +- paddle/fluid/operators/math/sampler.cc | 17 ++++++++--------- paddle/fluid/operators/math/sampler.h | 14 +++++++------- .../mkldnn/gaussian_random_mkldnn_op.cc | 2 +- paddle/fluid/operators/randint_op.cc | 2 +- paddle/fluid/operators/randperm_op.h | 2 +- paddle/fluid/operators/sampling_id_op.h | 2 +- .../operators/truncated_gaussian_random_op.cc | 2 +- paddle/fluid/operators/uniform_random_op.cc | 2 +- 14 files changed, 46 insertions(+), 47 deletions(-) diff --git a/paddle/fluid/framework/generator.cc b/paddle/fluid/framework/generator.cc index bd32e6beaaeee..b83f15449aba2 100644 --- a/paddle/fluid/framework/generator.cc +++ b/paddle/fluid/framework/generator.cc @@ -31,7 +31,7 @@ const std::shared_ptr& DefaultCPUGenerator() { return default_cpu_generator; } -const std::shared_ptr& OpDefaultCPUEngine() { +std::shared_ptr OpDefaultCPUEngine() { static auto op_default_cpu_engine = std::make_shared(); return op_default_cpu_engine; } @@ -42,7 +42,7 @@ const std::shared_ptr& OpDefaultCPUEngine() { // (2) op seed is not set and DefaultCPUGenerator is not inited, use se // OpDefaultCPUEngine() and set a radnom seed // (3) op seed is set, use OpDefaultCPUEngine() and set the seed -const std::mt19937_64& GetCPURandomEngine(uint64_t seed) { +std::shared_ptr GetCPURandomEngine(uint64_t seed) { if (DefaultCPUGenerator()->GetIsInitPy() && seed == 0) { return DefaultCPUGenerator()->GetCPUEngine(); } else { @@ -54,7 +54,7 @@ const std::mt19937_64& GetCPURandomEngine(uint64_t seed) { std::lock_guard lock(mu_); OpDefaultCPUEngine()->seed(seed); } - return *OpDefaultCPUEngine(); + return OpDefaultCPUEngine(); } } @@ -80,7 +80,7 @@ uint64_t Generator::Seed() { seed = ((((uint64_t)de()) << 32) + de()) & 0x1FFFFFFFFFFFFF; this->state_->current_seed = seed; std::seed_seq seq({seed}); - this->state_->cpu_engine.seed(seq); + this->state_->cpu_engine->seed(seq); return this->state_->current_seed; } @@ -89,22 +89,23 @@ void Generator::SetCurrentSeed(uint64_t seed) { std::lock_guard lock(this->mutex); this->state_->current_seed = uint64_t(seed); std::seed_seq seq({seed}); - this->state_->cpu_engine.seed(seq); + this->state_->cpu_engine->seed(seq); } -std::mt19937_64& Generator::GetCPUEngine() { +std::shared_ptr Generator::GetCPUEngine() { std::lock_guard lock(this->mutex); return this->state_->cpu_engine; } -void Generator::SetCPUEngine(std::mt19937_64 engine) { +void Generator::SetCPUEngine(std::shared_ptr engine) { std::lock_guard lock(this->mutex); - this->state_->cpu_engine = std::mt19937_64(engine); + this->state_->cpu_engine = engine; } uint64_t Generator::Random64() { std::lock_guard lock(this->mutex); - return this->state_->cpu_engine(); + auto engine = this->state_->cpu_engine; + return (*engine)(); } void Generator::SetIsInitPy(bool is_init_py) { diff --git a/paddle/fluid/framework/generator.h b/paddle/fluid/framework/generator.h index 5527c33092b99..4d557bb67de02 100644 --- a/paddle/fluid/framework/generator.h +++ b/paddle/fluid/framework/generator.h @@ -37,7 +37,7 @@ static uint64_t GetRandomSeed() { struct GeneratorState { int64_t device = -1; uint64_t current_seed = 34342423252; - std::mt19937_64 cpu_engine; + std::shared_ptr cpu_engine; }; struct Generator { @@ -46,7 +46,7 @@ struct Generator { default_gen_state_cpu.device = -1; default_gen_state_cpu.current_seed = GetRandomSeed(); std::seed_seq seq({default_gen_state_cpu.current_seed}); - default_gen_state_cpu.cpu_engine = std::mt19937_64(seq); + default_gen_state_cpu.cpu_engine = std::make_shared(seq); this->state_ = std::make_shared(default_gen_state_cpu); } explicit Generator(uint64_t seed) { @@ -54,7 +54,7 @@ struct Generator { default_gen_state_cpu.device = -1; default_gen_state_cpu.current_seed = seed; std::seed_seq seq({seed}); - default_gen_state_cpu.cpu_engine = std::mt19937_64(seq); + default_gen_state_cpu.cpu_engine = std::make_shared(seq); this->state_ = std::make_shared(default_gen_state_cpu); this->is_init_py_ = true; // TODO(zhiqiu): remove it in future } @@ -74,9 +74,9 @@ struct Generator { // set seed void SetCurrentSeed(uint64_t seed); // get cpu engine - std::mt19937_64& GetCPUEngine(); + std::shared_ptr GetCPUEngine(); // set cpu engine - void SetCPUEngine(std::mt19937_64 engine); + void SetCPUEngine(std::shared_ptr); uint64_t Random64(); @@ -99,9 +99,9 @@ struct Generator { const std::shared_ptr& DefaultCPUGenerator(); // If op seed is set or global is not set, the OpDefaultCPUEngine is used. -const std::shared_ptr& OpDefaultCPUEngine(); +std::shared_ptr OpDefaultCPUEngine(); -const std::mt19937_64& GetCPURandomEngine(uint64_t); +std::shared_ptr GetCPURandomEngine(uint64_t); } // namespace framework } // namespace paddle diff --git a/paddle/fluid/operators/bernoulli_op.cc b/paddle/fluid/operators/bernoulli_op.cc index 0e81f7accb549..79c4e2c2bba31 100644 --- a/paddle/fluid/operators/bernoulli_op.cc +++ b/paddle/fluid/operators/bernoulli_op.cc @@ -65,10 +65,10 @@ class BernoulliOpKernel int64_t size = x->numel(); std::uniform_real_distribution dist(0.0, 1.0); auto gen_ptr = framework::DefaultCPUGenerator(); - std::mt19937_64 &gen_engine = gen_ptr->GetCPUEngine(); + auto engine = gen_ptr->GetCPUEngine(); for (int64_t i = 0; i < size; ++i) { - out_data[i] = BernoulliFunctor(in_data[i], dist(gen_engine)); + out_data[i] = BernoulliFunctor(in_data[i], dist(*engine)); } } }; // namespace operators diff --git a/paddle/fluid/operators/distributed/large_scale_kv.h b/paddle/fluid/operators/distributed/large_scale_kv.h index ee35799c5d07c..9e39e68cba779 100644 --- a/paddle/fluid/operators/distributed/large_scale_kv.h +++ b/paddle/fluid/operators/distributed/large_scale_kv.h @@ -92,13 +92,13 @@ class UniformInitializer : public Initializer { random_engine_ = framework::GetCPURandomEngine(seed_); } - float GetValue() override { return dist_(random_engine_); } + float GetValue() override { return dist_(*random_engine_); } private: float min_; float max_; - std::mt19937_64 random_engine_; + std::shared_ptr random_engine_; std::uniform_real_distribution dist_; }; @@ -134,13 +134,13 @@ class GaussianInitializer : public Initializer { dist_ = std::normal_distribution(mean_, std_); } - float GetValue() override { return dist_(random_engine_); } + float GetValue() override { return dist_(*random_engine_); } private: float std_; float mean_; - std::mt19937_64 random_engine_; + std::shared_ptr random_engine_; std::normal_distribution dist_; }; diff --git a/paddle/fluid/operators/dropout_op.h b/paddle/fluid/operators/dropout_op.h index 915759d1651d1..06d4a253a8a28 100644 --- a/paddle/fluid/operators/dropout_op.h +++ b/paddle/fluid/operators/dropout_op.h @@ -65,12 +65,11 @@ class CPUDropoutKernel : public framework::OpKernel { seed_data = context.Attr("seed"); } auto engine = framework::GetCPURandomEngine(seed_data); - engine.seed(seed_data); std::uniform_real_distribution dist(0, 1); for (size_t i = 0; i < size; ++i) { - if (dist(engine) < dropout_prob) { + if (dist(*engine) < dropout_prob) { mask_data[i] = 0; y_data[i] = 0; } else { diff --git a/paddle/fluid/operators/gaussian_random_op.cc b/paddle/fluid/operators/gaussian_random_op.cc index 6db920b4af7c0..4f128463375b9 100644 --- a/paddle/fluid/operators/gaussian_random_op.cc +++ b/paddle/fluid/operators/gaussian_random_op.cc @@ -43,7 +43,7 @@ class CPUGaussianRandomKernel : public framework::OpKernel { auto engine = framework::GetCPURandomEngine(seed); for (int64_t i = 0; i < size; ++i) { - data[i] = dist(engine); + data[i] = dist(*engine); } } }; // namespace operators diff --git a/paddle/fluid/operators/math/sampler.cc b/paddle/fluid/operators/math/sampler.cc index 3ce642e13a4af..a4bdc923eecc3 100644 --- a/paddle/fluid/operators/math/sampler.cc +++ b/paddle/fluid/operators/math/sampler.cc @@ -32,17 +32,17 @@ Sampler::~Sampler() {} UniformSampler::UniformSampler(int64_t range, unsigned int seed) : Sampler(range, seed), inv_range_(1.0 / (range + 1)) { random_engine_ = framework::GetCPURandomEngine(seed_); - dist_ = std::make_shared>(0, range); + dist_ = std::make_shared>(0, range); } -int64_t UniformSampler::Sample() const { return (*dist_)(random_engine_); } +int64_t UniformSampler::Sample() const { return (*dist_)(*random_engine_); } float UniformSampler::Probability(int64_t value) const { return inv_range_; } LogUniformSampler::LogUniformSampler(int64_t range, unsigned int seed) : Sampler(range, seed), log_range_(log(range + 1)) { random_engine_ = framework::GetCPURandomEngine(seed_); - dist_ = std::make_shared>(0, 1); + dist_ = std::make_shared>(0, 1); } int64_t LogUniformSampler::Sample() const { @@ -50,7 +50,7 @@ int64_t LogUniformSampler::Sample() const { // inverse_transform_sampling method // More details: // https://wanghaoshuang.github.io/2017/11/Log-uniform-distribution-sampler/ - auto cur_random = (*dist_)(random_engine_); + auto cur_random = (*dist_)(*random_engine_); const int64_t value = static_cast(exp(cur_random * log_range_)) - 1; // Mathematically, value should be <= range_, but might not be due to some // floating point roundoff, so we mod by range_. @@ -70,9 +70,8 @@ CustomSampler::CustomSampler(int64_t range, const float *probabilities, unsigned int seed) : Sampler(range, seed) { random_engine_ = framework::GetCPURandomEngine(seed_); - real_dist_ = std::make_shared>(0, 1); - int_dist_ = - std::make_shared>(0, range); + real_dist_ = std::make_shared>(0, 1); + int_dist_ = std::make_shared>(0, range); alias_probs_ = alias_probabilities; probs_ = probabilities; @@ -80,8 +79,8 @@ CustomSampler::CustomSampler(int64_t range, const float *probabilities, } int64_t CustomSampler::Sample() const { - auto index = (*int_dist_)(random_engine_); - auto p = (*real_dist_)(random_engine_); + auto index = (*int_dist_)(*random_engine_); + auto p = (*real_dist_)(*random_engine_); if (p > alias_probs_[index]) { int alias = alias_[index]; diff --git a/paddle/fluid/operators/math/sampler.h b/paddle/fluid/operators/math/sampler.h index 11e2bf521c5ff..480576ef9dc8c 100644 --- a/paddle/fluid/operators/math/sampler.h +++ b/paddle/fluid/operators/math/sampler.h @@ -72,8 +72,8 @@ class UniformSampler : public Sampler { private: const float inv_range_; - std::mt19937_64 random_engine_; - std::shared_ptr> dist_; + std::shared_ptr random_engine_; + std::shared_ptr> dist_; }; /** @@ -93,8 +93,8 @@ class LogUniformSampler : public Sampler { private: const float log_range_; - std::mt19937_64 random_engine_; - std::shared_ptr> dist_; + std::shared_ptr random_engine_; + std::shared_ptr> dist_; }; /** @@ -117,9 +117,9 @@ class CustomSampler : public Sampler { const int* alias_; const float* probs_; const int exceptional_val = -1; - std::mt19937_64 random_engine_; - std::shared_ptr> real_dist_; - std::shared_ptr> int_dist_; + std::shared_ptr random_engine_; + std::shared_ptr> real_dist_; + std::shared_ptr> int_dist_; }; } // namespace math diff --git a/paddle/fluid/operators/mkldnn/gaussian_random_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/gaussian_random_mkldnn_op.cc index f4037fb3107cc..20bbb72d7cd60 100644 --- a/paddle/fluid/operators/mkldnn/gaussian_random_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/gaussian_random_mkldnn_op.cc @@ -40,7 +40,7 @@ class GaussianMKLDNNKernel : public paddle::framework::OpKernel { auto engine = framework::GetCPURandomEngine(seed); for (int64_t i = 0; i < size; ++i) { - data[i] = dist(gen_engine); + data[i] = dist(*gen_engine); } tensor->set_layout(DataLayout::kMKLDNN); diff --git a/paddle/fluid/operators/randint_op.cc b/paddle/fluid/operators/randint_op.cc index de9ab7f62938e..b3a2e14331955 100644 --- a/paddle/fluid/operators/randint_op.cc +++ b/paddle/fluid/operators/randint_op.cc @@ -50,7 +50,7 @@ class CPURandintKernel : public framework::OpKernel { auto engine = framework::GetCPURandomEngine(seed); for (int64_t i = 0; i < size; ++i) { - data[i] = dist(engine); + data[i] = dist(*engine); } } }; diff --git a/paddle/fluid/operators/randperm_op.h b/paddle/fluid/operators/randperm_op.h index 0f14bf509b2de..02aabb9a7b569 100644 --- a/paddle/fluid/operators/randperm_op.h +++ b/paddle/fluid/operators/randperm_op.h @@ -35,7 +35,7 @@ static inline void random_permate(T* data_ptr, int num, unsigned int seed) { data_ptr[i] = static_cast(i); } - std::shuffle(data_ptr, data_ptr + num, engine); + std::shuffle(data_ptr, data_ptr + num, *engine); } template diff --git a/paddle/fluid/operators/sampling_id_op.h b/paddle/fluid/operators/sampling_id_op.h index d8dcbfde1e407..9bec08f593afe 100644 --- a/paddle/fluid/operators/sampling_id_op.h +++ b/paddle/fluid/operators/sampling_id_op.h @@ -59,7 +59,7 @@ class SamplingIdKernel : public framework::OpKernel { auto engine = framework::GetCPURandomEngine(seed); std::vector ids(batch_size); for (int i = 0; i < batch_size; ++i) { - T r = dist(engine); + T r = dist(*engine); int idx = width - 1; for (int j = 0; j < width; ++j) { if ((r -= ins_vector[i * width + j]) < 0) { diff --git a/paddle/fluid/operators/truncated_gaussian_random_op.cc b/paddle/fluid/operators/truncated_gaussian_random_op.cc index 58e886b7d4e0e..419f0f7a2a578 100644 --- a/paddle/fluid/operators/truncated_gaussian_random_op.cc +++ b/paddle/fluid/operators/truncated_gaussian_random_op.cc @@ -171,7 +171,7 @@ class CPUTruncatedGaussianRandomKernel : public framework::OpKernel { unsigned int seed = static_cast(context.Attr("seed")); auto engine = framework::GetCPURandomEngine(seed); for (int64_t i = 0; i < size; ++i) { - data[i] = truncated_normal(dist(engine)); + data[i] = truncated_normal(dist(*engine)); } } }; diff --git a/paddle/fluid/operators/uniform_random_op.cc b/paddle/fluid/operators/uniform_random_op.cc index be0da433e6268..9cffe09a33abf 100644 --- a/paddle/fluid/operators/uniform_random_op.cc +++ b/paddle/fluid/operators/uniform_random_op.cc @@ -68,7 +68,7 @@ class CPUUniformRandomKernel : public framework::OpKernel { auto engine = framework::GetCPURandomEngine(seed); for (int64_t i = 0; i < size; ++i) { - data[i] = dist(engine); + data[i] = dist(*engine); } unsigned int diag_num = From 2891d9cb4c5e72204c267278a73849ba770b73fd Mon Sep 17 00:00:00 2001 From: zhiqiu Date: Thu, 27 Aug 2020 02:37:42 +0800 Subject: [PATCH 21/28] fix double free --- paddle/fluid/framework/generator.cc | 50 +++++++++-------- paddle/fluid/framework/generator.h | 54 ++++++++++--------- paddle/fluid/pybind/generator_py.cc | 8 ++- .../test_reinforcement_learning.py | 1 + .../dygraph_to_static/test_transformer.py | 4 ++ .../unittests/test_dygraph_multi_forward.py | 2 + .../tests/unittests/test_fuse_bn_act_pass.py | 1 + .../fluid/tests/unittests/test_generator.py | 1 - .../tests/unittests/test_imperative_gan.py | 3 ++ .../tests/unittests/test_imperative_gnn.py | 3 ++ ..._imperative_lod_tensor_to_selected_rows.py | 2 + .../test_imperative_ocr_attention_model.py | 2 + .../unittests/test_imperative_optimizer.py | 3 ++ .../unittests/test_imperative_optimizer_v2.py | 3 ++ .../unittests/test_imperative_ptb_rnn.py | 2 + ...test_imperative_ptb_rnn_sorted_gradient.py | 3 ++ .../test_imperative_reinforcement.py | 2 + .../tests/unittests/test_imperative_resnet.py | 2 + .../test_imperative_resnet_sorted_gradient.py | 3 ++ .../unittests/test_imperative_se_resnext.py | 2 + ..._imperative_selected_rows_to_lod_tensor.py | 2 + ...perative_star_gan_with_gradient_penalty.py | 2 + ..._imperative_transformer_sorted_gradient.py | 3 +- .../fluid/tests/unittests/test_layers.py | 2 + .../fluid/tests/unittests/test_manual_seed.py | 37 +++++++------ .../fluid/tests/unittests/test_random_seed.py | 6 +-- .../fluid/tests/unittests/test_regularizer.py | 3 ++ python/paddle/framework/random.py | 20 +++++-- .../paddle/incubate/hapi/tests/test_text.py | 1 + 29 files changed, 150 insertions(+), 77 deletions(-) diff --git a/paddle/fluid/framework/generator.cc b/paddle/fluid/framework/generator.cc index b83f15449aba2..f1bcf6a7eaa54 100644 --- a/paddle/fluid/framework/generator.cc +++ b/paddle/fluid/framework/generator.cc @@ -28,6 +28,8 @@ namespace framework { const std::shared_ptr& DefaultCPUGenerator() { static auto default_cpu_generator = std::make_shared(GetRandomSeed()); + VLOG(4) << "initial seed: " << default_cpu_generator->GetCurrentSeed() + << ", cpu engine: " << default_cpu_generator->GetCPUEngine().get(); return default_cpu_generator; } @@ -44,10 +46,14 @@ std::shared_ptr OpDefaultCPUEngine() { // (3) op seed is set, use OpDefaultCPUEngine() and set the seed std::shared_ptr GetCPURandomEngine(uint64_t seed) { if (DefaultCPUGenerator()->GetIsInitPy() && seed == 0) { + VLOG(4) << "Use random engine from generator"; return DefaultCPUGenerator()->GetCPUEngine(); } else { if (seed == 0) { seed = GetRandomSeed(); + VLOG(4) << "Use default random engine with random seed = " << seed; + } else { + VLOG(4) << "Use default random engine with fixed random seed = " << seed; } static std::mutex mu_; { @@ -58,53 +64,55 @@ std::shared_ptr GetCPURandomEngine(uint64_t seed) { } } -GeneratorState* Generator::GetState() { - std::lock_guard lock(this->mutex); - return this->state_.get(); +GeneratorState Generator::GetState() { + std::lock_guard lock(this->mu_); + state_.cpu_engine = *engine_; + return this->state_; } -void Generator::SetState(GeneratorState* state_in) { - std::lock_guard lock(this->mutex); - *this->state_ = *state_in; +void Generator::SetState(const GeneratorState& state) { + std::lock_guard lock(this->mu_); + this->state_ = state; + this->engine_ = std::make_shared(state.cpu_engine); } uint64_t Generator::GetCurrentSeed() { - std::lock_guard lock(this->mutex); - return this->state_->current_seed; + std::lock_guard lock(this->mu_); + return this->state_.current_seed; } uint64_t Generator::Seed() { - std::lock_guard lock(this->mutex); + std::lock_guard lock(this->mu_); uint64_t seed; std::random_device de; seed = ((((uint64_t)de()) << 32) + de()) & 0x1FFFFFFFFFFFFF; - this->state_->current_seed = seed; + this->state_.current_seed = seed; std::seed_seq seq({seed}); - this->state_->cpu_engine->seed(seq); + this->engine_->seed(seq); - return this->state_->current_seed; + return this->state_.current_seed; } void Generator::SetCurrentSeed(uint64_t seed) { - std::lock_guard lock(this->mutex); - this->state_->current_seed = uint64_t(seed); + std::lock_guard lock(this->mu_); + this->state_.current_seed = seed; std::seed_seq seq({seed}); - this->state_->cpu_engine->seed(seq); + this->engine_->seed(seq); } std::shared_ptr Generator::GetCPUEngine() { - std::lock_guard lock(this->mutex); - return this->state_->cpu_engine; + std::lock_guard lock(this->mu_); + return this->engine_; } void Generator::SetCPUEngine(std::shared_ptr engine) { - std::lock_guard lock(this->mutex); - this->state_->cpu_engine = engine; + std::lock_guard lock(this->mu_); + this->engine_ = engine; } uint64_t Generator::Random64() { - std::lock_guard lock(this->mutex); - auto engine = this->state_->cpu_engine; + std::lock_guard lock(this->mu_); + auto engine = this->engine_; return (*engine)(); } diff --git a/paddle/fluid/framework/generator.h b/paddle/fluid/framework/generator.h index 4d557bb67de02..82b35f7ad550e 100644 --- a/paddle/fluid/framework/generator.h +++ b/paddle/fluid/framework/generator.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once +#include #include #include @@ -37,36 +38,38 @@ static uint64_t GetRandomSeed() { struct GeneratorState { int64_t device = -1; uint64_t current_seed = 34342423252; - std::shared_ptr cpu_engine; + std::mt19937_64 cpu_engine; }; struct Generator { Generator() { - GeneratorState default_gen_state_cpu; - default_gen_state_cpu.device = -1; - default_gen_state_cpu.current_seed = GetRandomSeed(); - std::seed_seq seq({default_gen_state_cpu.current_seed}); - default_gen_state_cpu.cpu_engine = std::make_shared(seq); - this->state_ = std::make_shared(default_gen_state_cpu); + auto seed = GetRandomSeed(); + std::seed_seq seq({seed}); + auto engine = std::make_shared(seq); + this->state_.cpu_engine = *engine; + this->state_.device = -1; + this->state_.current_seed = seed; + this->engine_ = engine; + VLOG(4) << "initial seed: " << this->state_.current_seed + << ", cpu engine: " << &this->state_.cpu_engine; } explicit Generator(uint64_t seed) { - GeneratorState default_gen_state_cpu; - default_gen_state_cpu.device = -1; - default_gen_state_cpu.current_seed = seed; std::seed_seq seq({seed}); - default_gen_state_cpu.cpu_engine = std::make_shared(seq); - this->state_ = std::make_shared(default_gen_state_cpu); + auto engine = std::make_shared(seq); + this->state_.cpu_engine = *engine; + this->state_.device = -1; + this->state_.current_seed = seed; + this->engine_ = engine; + VLOG(4) << "initial seed: " << this->state_.current_seed + << ", cpu engine: " << &this->state_.cpu_engine; this->is_init_py_ = true; // TODO(zhiqiu): remove it in future } - explicit Generator(GeneratorState state_in) - : state_{std::make_shared(state_in)} {} - Generator(const Generator& other) - : Generator(other, std::lock_guard(other.mutex)) {} + Generator(const Generator& other) = delete; // get random state - GeneratorState* GetState(); + GeneratorState GetState(); // set random state - void SetState(GeneratorState* state_in); + void SetState(const GeneratorState&); // get current seed uint64_t GetCurrentSeed(); // random a seed and get @@ -84,14 +87,13 @@ struct Generator { bool GetIsInitPy() const; private: - std::shared_ptr state_; - mutable std::mutex mutex; - - Generator(const Generator& other, const std::lock_guard&) - : state_(std::make_shared(*(other.state_))) {} - // NOTE(zhiqiu): is_init_py_ is used to make generator be compatible with old - // seed, and it should be removed after all random-related operators and - // unittests upgrades to use generator. + GeneratorState state_; + std::shared_ptr engine_; + mutable std::mutex mu_; + + // NOTE(zhiqiu): is_init_py_ is used to make generator be compatible with + // old seed, and it should be removed after all random-related operators + // and unittests upgrades to use generator. bool is_init_py_ = false; }; diff --git a/paddle/fluid/pybind/generator_py.cc b/paddle/fluid/pybind/generator_py.cc index 922bdddc1f8ff..90b7f50105253 100644 --- a/paddle/fluid/pybind/generator_py.cc +++ b/paddle/fluid/pybind/generator_py.cc @@ -44,8 +44,7 @@ void BindGenerator(py::module* m_ptr) { [](framework::Generator& self) { new (&self) framework::Generator(); }) - .def("get_state", &framework::Generator::GetState, - py::return_value_policy::move) + .def("get_state", &framework::Generator::GetState) .def("set_state", &framework::Generator::SetState) .def("manual_seed", [](std::shared_ptr& self, uint64_t seed) { @@ -55,9 +54,8 @@ void BindGenerator(py::module* m_ptr) { .def("seed", &framework::Generator::Seed) .def("initial_seed", &framework::Generator::GetCurrentSeed) .def("random", &framework::Generator::Random64) - .def("get_cpu_engine", &framework::Generator::GetCPUEngine, - py::return_value_policy::move) - .def("set_cpu_engine", &framework::Generator::SetCPUEngine) + // .def("get_cpu_engine", &framework::Generator::GetCPUEngine) + // .def("set_cpu_engine", &framework::Generator::SetCPUEngine) .def_property("_is_init_py", &framework::Generator::GetIsInitPy, &framework::Generator::SetIsInitPy); m.def("default_cpu_generator", &framework::DefaultCPUGenerator); diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_reinforcement_learning.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_reinforcement_learning.py index 8d9ee61f5ec75..1d211197ebd48 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_reinforcement_learning.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_reinforcement_learning.py @@ -66,6 +66,7 @@ def train(args, place, to_static): with fluid.dygraph.guard(place): paddle.manual_seed(SEED) + paddle.framework.random._manual_program_seed(SEED) local_random = np.random.RandomState(SEED) policy = Policy() diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_transformer.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_transformer.py index 735e131c141a6..4fc8d27d30cb8 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_transformer.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_transformer.py @@ -33,6 +33,7 @@ def train_static(args, batch_generator): paddle.manual_seed(SEED) + paddle.framework.random._manual_program_seed(SEED) train_prog = fluid.Program() startup_prog = fluid.Program() @@ -130,6 +131,7 @@ def train_dygraph(args, batch_generator): with fluid.dygraph.guard(place): if SEED is not None: paddle.manual_seed(SEED) + paddle.framework.random._manual_program_seed(SEED) # define data loader train_loader = fluid.io.DataLoader.from_generator(capacity=10) train_loader.set_batch_generator(batch_generator, places=place) @@ -221,6 +223,7 @@ def train_dygraph(args, batch_generator): def predict_dygraph(args, batch_generator): with fluid.dygraph.guard(place): paddle.manual_seed(SEED) + paddle.framework.random._manual_program_seed(SEED) # define data loader test_loader = fluid.io.DataLoader.from_generator(capacity=10) @@ -292,6 +295,7 @@ def predict_static(args, batch_generator): test_prog = fluid.Program() with fluid.program_guard(test_prog): paddle.manual_seed(SEED) + paddle.framework.random._manual_program_seed(SEED) # define input and reader input_field_names = util.encoder_data_input_fields + util.fast_decoder_data_input_fields diff --git a/python/paddle/fluid/tests/unittests/test_dygraph_multi_forward.py b/python/paddle/fluid/tests/unittests/test_dygraph_multi_forward.py index 3604e844df154..88b496c1d89e6 100644 --- a/python/paddle/fluid/tests/unittests/test_dygraph_multi_forward.py +++ b/python/paddle/fluid/tests/unittests/test_dygraph_multi_forward.py @@ -111,6 +111,7 @@ def test_mnist_forward_float32(self): with fluid.dygraph.guard(): paddle.manual_seed(SEED) + paddle.framework.random._manual_program_seed(SEED) mnist = MNIST() sgd = SGDOptimizer( learning_rate=1e-3, parameter_list=mnist.parameters()) @@ -143,6 +144,7 @@ def test_mnist_forward_float32(self): with new_program_scope(): paddle.manual_seed(SEED) + paddle.framework.random._manual_program_seed(SEED) exe = fluid.Executor(fluid.CPUPlace( ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) diff --git a/python/paddle/fluid/tests/unittests/test_fuse_bn_act_pass.py b/python/paddle/fluid/tests/unittests/test_fuse_bn_act_pass.py index 85e5513f8fb2d..921dbdbc6d4e1 100644 --- a/python/paddle/fluid/tests/unittests/test_fuse_bn_act_pass.py +++ b/python/paddle/fluid/tests/unittests/test_fuse_bn_act_pass.py @@ -58,6 +58,7 @@ def build_program(self, main_program, startup_program, use_cuda, seed=1): def check(self, place, use_cuda): paddle.manual_seed(1) + paddle.framework.random._manual_program_seed(1) main_program = fluid.Program() startup_program = fluid.Program() x, y, loss = self.build_program(main_program, startup_program, use_cuda) diff --git a/python/paddle/fluid/tests/unittests/test_generator.py b/python/paddle/fluid/tests/unittests/test_generator.py index 6cc43d3d54982..d40463ed788fb 100644 --- a/python/paddle/fluid/tests/unittests/test_generator.py +++ b/python/paddle/fluid/tests/unittests/test_generator.py @@ -34,7 +34,6 @@ def test_basic_generator(self): st = gen.get_state() gen.set_state(st) gen.random() - gen.set_cpu_engine(gen.get_cpu_engine()) def test_basic_generator_error(self): self.assertRaises(ValueError, generator.Generator, device="CUDA") diff --git a/python/paddle/fluid/tests/unittests/test_imperative_gan.py b/python/paddle/fluid/tests/unittests/test_imperative_gan.py index 881e74a9fae44..b752b439f0fa9 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_gan.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_gan.py @@ -57,6 +57,7 @@ class TestDygraphGAN(unittest.TestCase): def test_gan_float32(self): seed = 90 paddle.manual_seed(1) + paddle.framework.random._manual_program_seed(1) startup = fluid.Program() discriminate_p = fluid.Program() generate_p = fluid.Program() @@ -131,6 +132,7 @@ def test_gan_float32(self): dy_params = dict() with fluid.dygraph.guard(): paddle.manual_seed(1) + paddle.framework.random._manual_program_seed(1) discriminator = Discriminator() generator = Generator() @@ -175,6 +177,7 @@ def test_gan_float32(self): with fluid.dygraph.guard(): fluid.set_flags({'FLAGS_sort_sum_gradient': True}) paddle.manual_seed(1) + paddle.framework.random._manual_program_seed(1) discriminator2 = Discriminator() generator2 = Generator() sgd2 = SGDOptimizer( diff --git a/python/paddle/fluid/tests/unittests/test_imperative_gnn.py b/python/paddle/fluid/tests/unittests/test_imperative_gnn.py index 533db6b12c936..4db6f2d0da1d5 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_gnn.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_gnn.py @@ -62,6 +62,7 @@ def forward(self, x, adj): class TestDygraphGNN(unittest.TestCase): def test_gnn_float32(self): paddle.manual_seed(90) + paddle.framework.random._manual_program_seed(90) startup = fluid.Program() main = fluid.Program() @@ -112,6 +113,7 @@ def test_gnn_float32(self): with fluid.dygraph.guard(): paddle.manual_seed(90) + paddle.framework.random._manual_program_seed(90) features = np.ones([1, 100, 50], dtype=np.float32) # Use selected rows when it's supported. @@ -137,6 +139,7 @@ def test_gnn_float32(self): with fluid.dygraph.guard(): paddle.manual_seed(90) + paddle.framework.random._manual_program_seed(90) features2 = np.ones([1, 100, 50], dtype=np.float32) # Use selected rows when it's supported. diff --git a/python/paddle/fluid/tests/unittests/test_imperative_lod_tensor_to_selected_rows.py b/python/paddle/fluid/tests/unittests/test_imperative_lod_tensor_to_selected_rows.py index 2e962844c236a..f0fea2d7eb75c 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_lod_tensor_to_selected_rows.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_lod_tensor_to_selected_rows.py @@ -96,6 +96,7 @@ def simple_net_float32(self, is_sparse, dtype): for is_sort_sum_gradient in [True, False]: with fluid.dygraph.guard(place): paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) simple_net = SimpleNet( hidden_size=hidden_size, @@ -140,6 +141,7 @@ def simple_net_float32(self, is_sparse, dtype): with new_program_scope(): paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) simple_net = SimpleNet( hidden_size=hidden_size, diff --git a/python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py b/python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py index a0e898b14f272..5400b785d2929 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py @@ -404,6 +404,7 @@ def test_while_op(self): with fluid.dygraph.guard(): fluid.set_flags({'FLAGS_sort_sum_gradient': True}) paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) ocr_attention = OCRAttention() if Config.learning_rate_decay == "piecewise_decay": @@ -454,6 +455,7 @@ def test_while_op(self): with new_program_scope(): paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) exe = fluid.Executor(fluid.CPUPlace( ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) ocr_attention = OCRAttention() diff --git a/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py b/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py index 71e1e8e081cab..7876675bcc6a1 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py @@ -75,6 +75,7 @@ def _check_exception(self, exception_message, place=None): with fluid.dygraph.guard(place): try: paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) mlp = MLP() optimizer = self.get_optimizer_dygraph( parameter_list=mlp.parameters()) @@ -91,6 +92,7 @@ def _check_mlp(self, place=None): with fluid.dygraph.guard(place): paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) mlp = MLP() optimizer = self.get_optimizer_dygraph( @@ -131,6 +133,7 @@ def _check_mlp(self, place=None): with new_program_scope(): paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) if place == None: place = fluid.CPUPlace() if not core.is_compiled_with_cuda( diff --git a/python/paddle/fluid/tests/unittests/test_imperative_optimizer_v2.py b/python/paddle/fluid/tests/unittests/test_imperative_optimizer_v2.py index 2481f024fdab8..ca8fb4c220f5e 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_optimizer_v2.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_optimizer_v2.py @@ -75,6 +75,7 @@ def _check_exception(self, exception_message, place=None): with fluid.dygraph.guard(place): try: paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) mlp = MLP() optimizer = self.get_optimizer_dygraph( parameter_list=mlp.parameters()) @@ -91,6 +92,7 @@ def _check_mlp(self, place=None): with fluid.dygraph.guard(place): paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) mlp = MLP() optimizer = self.get_optimizer_dygraph( @@ -131,6 +133,7 @@ def _check_mlp(self, place=None): with new_program_scope(): paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) if place == None: place = fluid.CPUPlace() if not core.is_compiled_with_cuda( diff --git a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py index bf5fc13617460..fa23ff8e7c29f 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py @@ -227,6 +227,7 @@ def ptb_rnn_cpu_float32(self, is_sparse): with fluid.dygraph.guard(): paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to ptb_model = PtbModel( hidden_size=hidden_size, @@ -294,6 +295,7 @@ def ptb_rnn_cpu_float32(self, is_sparse): with new_program_scope(): paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) ptb_model = PtbModel( hidden_size=hidden_size, vocab_size=vocab_size, diff --git a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn_sorted_gradient.py b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn_sorted_gradient.py index a02b7db61c2df..0487f8dd9a640 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn_sorted_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn_sorted_gradient.py @@ -46,6 +46,7 @@ def ptb_rnn_sort_gradient_cpu_float32(self, is_sparse): with fluid.dygraph.guard(): fluid.set_flags({'FLAGS_sort_sum_gradient': True}) paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to ptb_model = PtbModel( @@ -95,6 +96,8 @@ def ptb_rnn_sort_gradient_cpu_float32(self, is_sparse): with new_program_scope(): paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) + ptb_model = PtbModel( hidden_size=hidden_size, vocab_size=vocab_size, diff --git a/python/paddle/fluid/tests/unittests/test_imperative_reinforcement.py b/python/paddle/fluid/tests/unittests/test_imperative_reinforcement.py index 935c28dbebb7f..0076c61e58407 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_reinforcement.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_reinforcement.py @@ -65,6 +65,7 @@ def test_mnist_float32(self): with fluid.dygraph.guard(): paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) policy = Policy(input_size=4) @@ -105,6 +106,7 @@ def test_mnist_float32(self): with new_program_scope(): paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) exe = fluid.Executor(fluid.CPUPlace( ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_resnet.py b/python/paddle/fluid/tests/unittests/test_imperative_resnet.py index 7602994fcecee..e8a2298c17d00 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_resnet.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_resnet.py @@ -252,6 +252,7 @@ def test_resnet_float32(self): with fluid.dygraph.guard(): paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) resnet = ResNet() optimizer = optimizer_setting( @@ -334,6 +335,7 @@ def test_resnet_float32(self): with new_program_scope(): paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) exe = fluid.Executor(fluid.CPUPlace( ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_resnet_sorted_gradient.py b/python/paddle/fluid/tests/unittests/test_imperative_resnet_sorted_gradient.py index f9ec003cc9c64..13b12da3318ca 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_resnet_sorted_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_resnet_sorted_gradient.py @@ -79,6 +79,8 @@ def test_resnet_sort_gradient_float32(self): with fluid.dygraph.guard(): fluid.set_flags({'FLAGS_sort_sum_gradient': True}) paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) + resnet = ResNet() optimizer = optimizer_setting( train_parameters, parameter_list=resnet.parameters()) @@ -136,6 +138,7 @@ def test_resnet_sort_gradient_float32(self): with new_program_scope(): paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) exe = fluid.Executor(fluid.CPUPlace( ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_se_resnext.py b/python/paddle/fluid/tests/unittests/test_imperative_se_resnext.py index e22a91f848331..a04e1e4e5aafe 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_se_resnext.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_se_resnext.py @@ -309,6 +309,7 @@ def test_se_resnext_float32(self): epoch_num = 1 with fluid.dygraph.guard(): paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) se_resnext = SeResNeXt() optimizer = optimizer_setting( @@ -367,6 +368,7 @@ def test_se_resnext_float32(self): with new_program_scope(): paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) exe = fluid.Executor(fluid.CPUPlace( ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_selected_rows_to_lod_tensor.py b/python/paddle/fluid/tests/unittests/test_imperative_selected_rows_to_lod_tensor.py index 5b612d2267635..794f59e48507e 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_selected_rows_to_lod_tensor.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_selected_rows_to_lod_tensor.py @@ -103,6 +103,7 @@ def simple_net_float(self, is_sparse, dtype): traced_layer = None with fluid.dygraph.guard(place): paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) simple_net = SimpleNet( hidden_size=hidden_size, @@ -146,6 +147,7 @@ def simple_net_float(self, is_sparse, dtype): with new_program_scope(): paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) simple_net = SimpleNet( hidden_size=hidden_size, diff --git a/python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py b/python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py index f3dd72bfca906..e94157fa047ee 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py @@ -469,6 +469,7 @@ def build_optimizer(layer, cfg, loss=None): class DyGraphTrainModel(object): def __init__(self, cfg): paddle.manual_seed(1) + paddle.framework.random._manual_program_seed(1) self.generator = Generator(cfg) self.discriminator = Discriminator(cfg) @@ -529,6 +530,7 @@ def create_data_layer(): return image_real, label_org, label_trg paddle.manual_seed(cfg.seed) + paddle.framework.random._manual_program_seed(cfg.seed) self.gen_program = fluid.Program() gen_startup_program = fluid.Program() diff --git a/python/paddle/fluid/tests/unittests/test_imperative_transformer_sorted_gradient.py b/python/paddle/fluid/tests/unittests/test_imperative_transformer_sorted_gradient.py index 35418e0ac8db0..9f58ef881e4e4 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_transformer_sorted_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_transformer_sorted_gradient.py @@ -952,7 +952,7 @@ def transformer_sort_gradient_float32(self, is_sparse): with guard(): fluid.set_flags({'FLAGS_sort_sum_gradient': True}) paddle.manual_seed(seed) - + paddle.framework.random._manual_program_seed(seed) transformer = TransFormer( ModelHyperParams.src_vocab_size, ModelHyperParams.trg_vocab_size, @@ -1036,6 +1036,7 @@ def transformer_sort_gradient_float32(self, is_sparse): with new_program_scope(): paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) transformer = TransFormer( ModelHyperParams.src_vocab_size, ModelHyperParams.trg_vocab_size, diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index d862d40dc1a51..76a2be02f1f77 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -58,6 +58,7 @@ def _get_place(self, force_to_use_cpu=False): def static_graph(self): with new_program_scope(): paddle.manual_seed(self.seed) + paddle.framework.random._manual_program_seed(self.seed) yield def get_static_graph_result(self, @@ -77,6 +78,7 @@ def dynamic_graph(self, force_to_use_cpu=False): with fluid.dygraph.guard( self._get_place(force_to_use_cpu=force_to_use_cpu)): paddle.manual_seed(self.seed) + paddle.framework.random._manual_program_seed(self.seed) yield diff --git a/python/paddle/fluid/tests/unittests/test_manual_seed.py b/python/paddle/fluid/tests/unittests/test_manual_seed.py index 747026622e465..a1d6eb915ce78 100644 --- a/python/paddle/fluid/tests/unittests/test_manual_seed.py +++ b/python/paddle/fluid/tests/unittests/test_manual_seed.py @@ -15,30 +15,33 @@ from __future__ import print_function import unittest +import paddle import paddle.fluid as fluid from paddle.framework import manual_seed from paddle.fluid.framework import Program, default_main_program, default_startup_program +import numpy as np class TestManualSeed(unittest.TestCase): def test_manual_seed(self): - local_program = Program() - local_main_prog = default_main_program() - local_start_prog = default_startup_program() - - self.assertEqual(0, local_program.random_seed) - self.assertEqual(0, local_main_prog.random_seed) - self.assertEqual(0, local_start_prog.random_seed) - - manual_seed(102) - global_program1 = Program() - global_program2 = Program() - global_main_prog = default_main_program() - global_start_prog = default_startup_program() - self.assertEqual(102, global_program1.random_seed) - self.assertEqual(102, global_program2.random_seed) - self.assertEqual(102, global_main_prog.random_seed) - self.assertEqual(102, global_start_prog.random_seed) + fluid.enable_dygraph() + + gen = paddle.manual_seed(12312321111) + x = fluid.layers.gaussian_random([10], dtype="float32") + st1 = gen.get_state() + x1 = fluid.layers.gaussian_random([10], dtype="float32") + gen.set_state(st1) + x2 = fluid.layers.gaussian_random([10], dtype="float32") + gen.manual_seed(12312321111) + x3 = fluid.layers.gaussian_random([10], dtype="float32") + x_np = x.numpy() + x1_np = x1.numpy() + x2_np = x2.numpy() + x3_np = x3.numpy() + + if not fluid.core.is_compiled_with_cuda(): + self.assertTrue(np.allclose(x1_np, x2_np)) + self.assertTrue(np.allclose(x_np, x3_np)) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_random_seed.py b/python/paddle/fluid/tests/unittests/test_random_seed.py index c024bce049f64..343508bf619b6 100644 --- a/python/paddle/fluid/tests/unittests/test_random_seed.py +++ b/python/paddle/fluid/tests/unittests/test_random_seed.py @@ -26,9 +26,9 @@ class TestGeneratorSeed(unittest.TestCase): - """ - Test cases for cpu generator seed. - """ + # """ + # Test cases for cpu generator seed. + # """ def test_generator_uniform_random_dygraph(self): """Test Generator seed.""" diff --git a/python/paddle/fluid/tests/unittests/test_regularizer.py b/python/paddle/fluid/tests/unittests/test_regularizer.py index 6d288ea6e08b9..44087c5421a5e 100644 --- a/python/paddle/fluid/tests/unittests/test_regularizer.py +++ b/python/paddle/fluid/tests/unittests/test_regularizer.py @@ -170,6 +170,7 @@ def run_program(self, place, feed_list): def check_l2decay_regularizer(self, place, model): paddle.manual_seed(1) + paddle.framework.random._manual_program_seed(1) main_prog = fluid.framework.Program() startup_prog = fluid.framework.Program() with self.scope_prog_guard( @@ -189,6 +190,7 @@ def check_l2decay_regularizer(self, place, model): def check_l2decay(self, place, model): paddle.manual_seed(1) + paddle.framework.random._manual_program_seed(1) main_prog = fluid.framework.Program() startup_prog = fluid.framework.Program() @@ -245,6 +247,7 @@ def test_repeated_regularization(self): input = fluid.dygraph.to_variable( np.random.randn(3, 5).astype('float32')) paddle.manual_seed(1) + paddle.framework.random._manual_program_seed(1) linear1 = fluid.dygraph.Linear( 5, 2, param_attr=fc_param_attr, bias_attr=fc_param_attr) diff --git a/python/paddle/framework/random.py b/python/paddle/framework/random.py index 886daecfc99b9..a3341ae7b4f66 100644 --- a/python/paddle/framework/random.py +++ b/python/paddle/framework/random.py @@ -42,10 +42,24 @@ def manual_seed(seed): seed = int(seed) + core.default_cpu_generator()._is_init_py = True + return core.default_cpu_generator().manual_seed(seed) + + +def _manual_program_seed(seed): + """ + Sets global seed for generating random numbers. + + NOTE(zhiqiu): This is the original implemention of manual_seed. Keeps it temporally + since CUDA generator is not developed, so we need it in the unittest. + + Args: + seed(int): The random seed to set. It is recommend to set a large int number. + + Returns: + None + """ fluid.default_main_program().random_seed = seed fluid.default_startup_program().random_seed = seed program = fluid.Program() program.global_seed(seed) - - core.default_cpu_generator()._is_init_py = True - return core.default_cpu_generator().manual_seed(seed) diff --git a/python/paddle/incubate/hapi/tests/test_text.py b/python/paddle/incubate/hapi/tests/test_text.py index b2f8e7d314631..3c5323756b2bd 100644 --- a/python/paddle/incubate/hapi/tests/test_text.py +++ b/python/paddle/incubate/hapi/tests/test_text.py @@ -89,6 +89,7 @@ def _calc_output(self, place, mode="test", dygraph=True): else: fluid.disable_dygraph() paddle.manual_seed(self._random_seed) + paddle.framework.random._manual_program_seed(self._random_seed) layer = self.model_cls(**self.attrs) if isinstance( self.attrs, dict) else self.model_cls(*self.attrs) From abbe69c72966bcf0b81d31b25a9e193ef276aa4d Mon Sep 17 00:00:00 2001 From: zhiqiu Date: Thu, 27 Aug 2020 09:19:06 +0800 Subject: [PATCH 22/28] fix bug --- paddle/fluid/operators/mkldnn/gaussian_random_mkldnn_op.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/fluid/operators/mkldnn/gaussian_random_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/gaussian_random_mkldnn_op.cc index 20bbb72d7cd60..98200caca8cf6 100644 --- a/paddle/fluid/operators/mkldnn/gaussian_random_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/gaussian_random_mkldnn_op.cc @@ -40,7 +40,7 @@ class GaussianMKLDNNKernel : public paddle::framework::OpKernel { auto engine = framework::GetCPURandomEngine(seed); for (int64_t i = 0; i < size; ++i) { - data[i] = dist(*gen_engine); + data[i] = dist(*engine); } tensor->set_layout(DataLayout::kMKLDNN); From fb1cc0549d117dbb8446006e9c4878ee85d21c9f Mon Sep 17 00:00:00 2001 From: zhiqiu Date: Thu, 27 Aug 2020 12:09:36 +0800 Subject: [PATCH 23/28] fix bug --- .../fluid/tests/unittests/dygraph_to_static/test_bmn.py | 3 ++- python/paddle/fluid/tests/unittests/test_transformer_api.py | 4 +++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_bmn.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_bmn.py index 80790d9a36852..af7e73c41464d 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_bmn.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_bmn.py @@ -15,6 +15,7 @@ import math import numpy as np import unittest +import paddle from paddle.jit import to_static import paddle.fluid as fluid from paddle.fluid import ParamAttr @@ -561,7 +562,7 @@ def train_bmn(args, place, to_static): with fluid.dygraph.guard(place): paddle.manual_seed(SEED) - paddle.framework.random._manual_program_seed(seed) + paddle.framework.random._manual_program_seed(SEED) global local_random local_random = np.random.RandomState(SEED) diff --git a/python/paddle/fluid/tests/unittests/test_transformer_api.py b/python/paddle/fluid/tests/unittests/test_transformer_api.py index c8d1e77134036..5fea9f69a18c8 100644 --- a/python/paddle/fluid/tests/unittests/test_transformer_api.py +++ b/python/paddle/fluid/tests/unittests/test_transformer_api.py @@ -211,7 +211,8 @@ def ffn(src, encoder_layer, ffn_fc1_act="relu"): class TestTransformer(unittest.TestCase): def test_multi_head_attention(self): def multihead_attention_test_helper(self_attention, cache): - paddle.framework.manual_seed(2020) + paddle.manual_seed(2020) + paddle.framework.random._manual_program_seed(2020) # self_attention|cross_attention, cache|No cache with fluid.dygraph.guard(fluid.CPUPlace()): @@ -275,6 +276,7 @@ def test_transformer_encoder_layer(self): with fluid.dygraph.guard(fluid.CPUPlace()): paddle.framework.manual_seed(2020) + paddle.framework.random._manual_program_seed(2020) ffn_fc1_act = "relu" # 1.generate basic params From 7d914f90126037aa24302e1fa784133ef542a3af Mon Sep 17 00:00:00 2001 From: zhiqiu Date: Thu, 27 Aug 2020 12:58:50 +0800 Subject: [PATCH 24/28] fix bug --- paddle/fluid/framework/generator.cc | 5 +++-- paddle/fluid/operators/dropout_op.h | 6 ++++-- .../paddle/fluid/contrib/tests/test_weight_decay_extend.py | 2 ++ .../fluid/tests/unittests/dygraph_to_static/test_lac.py | 1 + .../tests/unittests/dygraph_to_static/test_mobile_net.py | 1 + .../fluid/tests/unittests/dygraph_to_static/test_ptb_lm.py | 1 + .../tests/unittests/dygraph_to_static/test_se_resnet.py | 1 + .../fluid/tests/unittests/dygraph_to_static/test_tsm.py | 1 + .../unittests/test_buffer_shared_memory_reuse_pass.py | 1 + .../paddle/fluid/tests/unittests/test_compiled_program.py | 3 +++ .../fluid/tests/unittests/test_decoupled_py_reader.py | 1 + .../tests/unittests/test_embedding_id_stop_gradient.py | 1 + .../fluid/tests/unittests/test_generator_dataloader.py | 1 + .../unittests/test_imperative_auto_mixed_precision.py | 2 ++ .../fluid/tests/unittests/test_imperative_double_grad.py | 2 ++ .../fluid/tests/unittests/test_imperative_save_load.py | 7 +++++++ .../fluid/tests/unittests/test_imperative_save_load_v2.py | 7 +++++++ .../tests/unittests/test_ir_memory_optimize_ifelse_op.py | 1 + python/paddle/fluid/tests/unittests/test_py_func_op.py | 1 + python/paddle/incubate/hapi/tests/test_model.py | 3 +++ 20 files changed, 44 insertions(+), 4 deletions(-) diff --git a/paddle/fluid/framework/generator.cc b/paddle/fluid/framework/generator.cc index f1bcf6a7eaa54..2b1f41c6583d2 100644 --- a/paddle/fluid/framework/generator.cc +++ b/paddle/fluid/framework/generator.cc @@ -49,6 +49,7 @@ std::shared_ptr GetCPURandomEngine(uint64_t seed) { VLOG(4) << "Use random engine from generator"; return DefaultCPUGenerator()->GetCPUEngine(); } else { + auto engine = std::make_shared(); if (seed == 0) { seed = GetRandomSeed(); VLOG(4) << "Use default random engine with random seed = " << seed; @@ -58,9 +59,9 @@ std::shared_ptr GetCPURandomEngine(uint64_t seed) { static std::mutex mu_; { std::lock_guard lock(mu_); - OpDefaultCPUEngine()->seed(seed); + engine->seed(seed); } - return OpDefaultCPUEngine(); + return engine; } } diff --git a/paddle/fluid/operators/dropout_op.h b/paddle/fluid/operators/dropout_op.h index 06d4a253a8a28..03602429f21e4 100644 --- a/paddle/fluid/operators/dropout_op.h +++ b/paddle/fluid/operators/dropout_op.h @@ -55,14 +55,16 @@ class CPUDropoutKernel : public framework::OpKernel { std::memset(mask_data, 0, size * sizeof(*mask_data)); // NOLINT return; } - + std::random_device rnd; + // std::minstd_rand engine; // NOTE: fixed seed should only be used in unittest or for debug. // Guarantee to use random seed in training. int seed_data; if (seed) { seed_data = *(seed->data()); } else { - seed_data = context.Attr("seed"); + seed_data = + context.Attr("fix_seed") ? context.Attr("seed") : rnd(); } auto engine = framework::GetCPURandomEngine(seed_data); diff --git a/python/paddle/fluid/contrib/tests/test_weight_decay_extend.py b/python/paddle/fluid/contrib/tests/test_weight_decay_extend.py index db32602eea5b5..a5f08ca969ac4 100644 --- a/python/paddle/fluid/contrib/tests/test_weight_decay_extend.py +++ b/python/paddle/fluid/contrib/tests/test_weight_decay_extend.py @@ -93,6 +93,7 @@ def run_program(self, place, feed_list): def check_weight_decay(self, place, model): paddle.manual_seed(1) + paddle.framework.random._manual_program_seed(1) main_prog = fluid.framework.Program() startup_prog = fluid.framework.Program() @@ -115,6 +116,7 @@ def check_weight_decay(self, place, model): def check_weight_decay2(self, place, model): paddle.manual_seed(1) + paddle.framework.random._manual_program_seed(1) main_prog = fluid.framework.Program() startup_prog = fluid.framework.Program() diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_lac.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_lac.py index 3e85a278c679e..4d735b565ddbc 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_lac.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_lac.py @@ -450,6 +450,7 @@ def do_train(args, to_static): ) else fluid.CPUPlace() with fluid.dygraph.guard(place): paddle.manual_seed(SEED) + paddle.framework.random._manual_program_seed(SEED) reader = get_random_input_data(args.batch_size, args.vocab_size, args.num_labels) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mobile_net.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mobile_net.py index 1d2eb6eb137bf..a377075062b26 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mobile_net.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mobile_net.py @@ -449,6 +449,7 @@ def train_mobilenet(args, to_static): np.random.seed(SEED) paddle.manual_seed(SEED) + paddle.framework.random._manual_program_seed(SEED) if args.model == "MobileNetV1": net = MobileNetV1(class_dim=args.class_dim, scale=1.0) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ptb_lm.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ptb_lm.py index 6786a02b9fac8..df2b69297bb4d 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ptb_lm.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ptb_lm.py @@ -219,6 +219,7 @@ def train(place): with fluid.dygraph.guard(place): paddle.manual_seed(SEED) + paddle.framework.random._manual_program_seed(SEED) ptb_model = PtbModel( hidden_size=hidden_size, vocab_size=vocab_size, diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_se_resnet.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_se_resnet.py index 9746eca9c9709..38e4d5ad5480b 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_se_resnet.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_se_resnet.py @@ -332,6 +332,7 @@ def train(train_reader, to_static): with fluid.dygraph.guard(place): paddle.manual_seed(SEED) + paddle.framework.random._manual_program_seed(SEED) se_resnext = SeResNeXt() optimizer = optimizer_setting(train_parameters, se_resnext.parameters()) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tsm.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tsm.py index 45b8083009cde..bedca412157f0 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tsm.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tsm.py @@ -273,6 +273,7 @@ def train(args, fake_data_reader, to_static): np.random.seed(0) with fluid.dygraph.guard(place): paddle.manual_seed(1000) + paddle.framework.random._manual_program_seed(1000) video_model = TSM_ResNet("TSM", train_config, 'Train') diff --git a/python/paddle/fluid/tests/unittests/test_buffer_shared_memory_reuse_pass.py b/python/paddle/fluid/tests/unittests/test_buffer_shared_memory_reuse_pass.py index 0cbd2c8f8f577..43d485a0a6d24 100644 --- a/python/paddle/fluid/tests/unittests/test_buffer_shared_memory_reuse_pass.py +++ b/python/paddle/fluid/tests/unittests/test_buffer_shared_memory_reuse_pass.py @@ -46,6 +46,7 @@ def setUp(self): def build_program_and_scope(self): self.place = fluid.CUDAPlace(0) if self.use_cuda else fluid.CPUPlace() paddle.manual_seed(1) + paddle.framework.random._manual_program_seed(1) startup_program = fluid.Program() main_program = fluid.Program() diff --git a/python/paddle/fluid/tests/unittests/test_compiled_program.py b/python/paddle/fluid/tests/unittests/test_compiled_program.py index 39eec7fc3364b..c6cccf11633ba 100644 --- a/python/paddle/fluid/tests/unittests/test_compiled_program.py +++ b/python/paddle/fluid/tests/unittests/test_compiled_program.py @@ -31,6 +31,7 @@ def setUp(self): low=0, high=10, size=[16, 1], dtype=np.int64) with new_program_scope(): paddle.manual_seed(self.seed) + paddle.framework.random._manual_program_seed(seed) place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda( ) else fluid.CPUPlace() exe = fluid.Executor(place) @@ -47,6 +48,7 @@ def setUp(self): def test_compiled_program_base(self): with new_program_scope(): paddle.manual_seed(self.seed) + paddle.framework.random._manual_program_seed(self.seed) place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda( ) else fluid.CPUPlace() exe = fluid.Executor(place) @@ -64,6 +66,7 @@ def test_compiled_program_base(self): def test_compiled_program_with_data_parallel(self): with new_program_scope(): paddle.manual_seed(self.seed) + paddle.framework.random._manual_program_seed(self.seed) place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda( ) else fluid.CPUPlace() exe = fluid.Executor(place) diff --git a/python/paddle/fluid/tests/unittests/test_decoupled_py_reader.py b/python/paddle/fluid/tests/unittests/test_decoupled_py_reader.py index 9fd8fa3b8bd9e..cc0f3745bbf7b 100644 --- a/python/paddle/fluid/tests/unittests/test_decoupled_py_reader.py +++ b/python/paddle/fluid/tests/unittests/test_decoupled_py_reader.py @@ -35,6 +35,7 @@ def random_reader(): def simple_fc_net(places, use_legacy_py_reader, use_double_buffer): paddle.manual_seed(1) + paddle.framework.random._manual_program_seed(1) startup_prog = fluid.Program() main_prog = fluid.Program() diff --git a/python/paddle/fluid/tests/unittests/test_embedding_id_stop_gradient.py b/python/paddle/fluid/tests/unittests/test_embedding_id_stop_gradient.py index e33cd81bbe1a2..c18b7c5b044e7 100644 --- a/python/paddle/fluid/tests/unittests/test_embedding_id_stop_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_embedding_id_stop_gradient.py @@ -40,6 +40,7 @@ def test_check_grad(self): def run_program(self, place, stop_gradient=False): np.random.seed(1) paddle.manual_seed(1) + paddle.framework.random._manual_program_seed(1) startup_program = fluid.Program() main_program = fluid.Program() diff --git a/python/paddle/fluid/tests/unittests/test_generator_dataloader.py b/python/paddle/fluid/tests/unittests/test_generator_dataloader.py index e1e0edc52629a..7c1ff41f7e767 100644 --- a/python/paddle/fluid/tests/unittests/test_generator_dataloader.py +++ b/python/paddle/fluid/tests/unittests/test_generator_dataloader.py @@ -36,6 +36,7 @@ def random_reader(): def simple_fc_net(places, use_legacy_py_reader, use_double_buffer): paddle.manual_seed(1) + paddle.framework.random._manual_program_seed(1) startup_prog = fluid.Program() main_prog = fluid.Program() diff --git a/python/paddle/fluid/tests/unittests/test_imperative_auto_mixed_precision.py b/python/paddle/fluid/tests/unittests/test_imperative_auto_mixed_precision.py index a4f3858d6fb24..fdf7adbfb45f0 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_auto_mixed_precision.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_auto_mixed_precision.py @@ -121,6 +121,7 @@ def test_minimize(self): def run_simple_conv(inp_np, use_scaler=True): paddle.manual_seed(10) + paddle.framework.random._manual_program_seed(10) with fluid.dygraph.guard(): model = SimpleConv( num_channels=3, @@ -204,6 +205,7 @@ def train_resnet(self, enable_amp=True): with fluid.dygraph.guard(): paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) resnet = ResNet(use_cudnn=True) optimizer = optimizer_setting( diff --git a/python/paddle/fluid/tests/unittests/test_imperative_double_grad.py b/python/paddle/fluid/tests/unittests/test_imperative_double_grad.py index 227cd5d4acb29..720c9f95c251e 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_double_grad.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_double_grad.py @@ -312,6 +312,7 @@ def model_f(input): with fluid.dygraph.guard(): paddle.manual_seed(123) + paddle.framework.random._manual_program_seed(123) a = fluid.dygraph.to_variable(value) a.stop_gradient = False @@ -328,6 +329,7 @@ def model_f(input): with fluid.dygraph.guard(): paddle.manual_seed(123) + paddle.framework.random._manual_program_seed(123) a = fluid.dygraph.to_variable(value) a.stop_gradient = False diff --git a/python/paddle/fluid/tests/unittests/test_imperative_save_load.py b/python/paddle/fluid/tests/unittests/test_imperative_save_load.py index fba793a83bd8f..e20f5d6a72278 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_save_load.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_save_load.py @@ -220,6 +220,7 @@ def setUp(self): with fluid.dygraph.guard(): paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to ptb_model = PtbModel( hidden_size=hidden_size, @@ -305,6 +306,7 @@ def testLoadAndSetVarBase(self): with fluid.dygraph.guard(): paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to ptb_model = PtbModel( hidden_size=hidden_size, @@ -413,6 +415,7 @@ def testSetVariable(self): with fluid.dygraph.guard(): paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to ptb_model = PtbModel( hidden_size=hidden_size, @@ -519,6 +522,7 @@ def testSetNumpy(self): with fluid.dygraph.guard(): paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to ptb_model = PtbModel( hidden_size=hidden_size, @@ -708,6 +712,7 @@ def testLoadAndSetVarBaseBeforeTrain(self): with fluid.dygraph.guard(): paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to ptb_model = PtbModel( hidden_size=hidden_size, @@ -798,7 +803,9 @@ def testSetNumpyBeforeTrain(self): with fluid.dygraph.guard(): paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to + ptb_model = PtbModel( hidden_size=hidden_size, vocab_size=vocab_size, diff --git a/python/paddle/fluid/tests/unittests/test_imperative_save_load_v2.py b/python/paddle/fluid/tests/unittests/test_imperative_save_load_v2.py index d7e04ea8c97c4..f26d6c37e1471 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_save_load_v2.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_save_load_v2.py @@ -220,6 +220,7 @@ def setUp(self): with fluid.dygraph.guard(): paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(123) # TODO: marsyang1993 Change seed to ptb_model = PtbModel( hidden_size=hidden_size, @@ -305,6 +306,7 @@ def testLoadAndSetVarBase(self): with fluid.dygraph.guard(): paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to ptb_model = PtbModel( hidden_size=hidden_size, @@ -413,6 +415,7 @@ def testSetVariable(self): with fluid.dygraph.guard(): paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to ptb_model = PtbModel( hidden_size=hidden_size, @@ -519,6 +522,7 @@ def testSetNumpy(self): with fluid.dygraph.guard(): paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to ptb_model = PtbModel( hidden_size=hidden_size, @@ -631,6 +635,7 @@ def testSetVariableBeforeTrain(self): with fluid.dygraph.guard(): paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to ptb_model = PtbModel( hidden_size=hidden_size, @@ -709,6 +714,7 @@ def testLoadAndSetVarBaseBeforeTrain(self): with fluid.dygraph.guard(): paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to ptb_model = PtbModel( hidden_size=hidden_size, @@ -799,6 +805,7 @@ def testSetNumpyBeforeTrain(self): with fluid.dygraph.guard(): paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to ptb_model = PtbModel( hidden_size=hidden_size, diff --git a/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_ifelse_op.py b/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_ifelse_op.py index 4f4622bf16ad4..eaa7e711a29c7 100644 --- a/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_ifelse_op.py +++ b/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_ifelse_op.py @@ -38,6 +38,7 @@ def check_network_convergence(self, use_mem_opt=False, iter_num=5): paddle.manual_seed(100) + paddle.framework.random._manual_program_seed(100) prog = Program() startup_prog = Program() with program_guard(prog, startup_prog): diff --git a/python/paddle/fluid/tests/unittests/test_py_func_op.py b/python/paddle/fluid/tests/unittests/test_py_func_op.py index b63e240c805f3..3e43e15bef168 100644 --- a/python/paddle/fluid/tests/unittests/test_py_func_op.py +++ b/python/paddle/fluid/tests/unittests/test_py_func_op.py @@ -148,6 +148,7 @@ def test_main(use_cuda, use_py_func_op, use_parallel_executor): with fluid.program_guard(fluid.Program(), fluid.Program()): with fluid.scope_guard(fluid.core.Scope()): paddle.manual_seed(1) + paddle.framework.random._manual_program_seed(1) np.random.seed(1) img = fluid.layers.data(name='image', shape=[784], dtype='float32') diff --git a/python/paddle/incubate/hapi/tests/test_model.py b/python/paddle/incubate/hapi/tests/test_model.py index 756a7ac226423..7fc471aa1e2ee 100644 --- a/python/paddle/incubate/hapi/tests/test_model.py +++ b/python/paddle/incubate/hapi/tests/test_model.py @@ -172,6 +172,7 @@ def setUpClass(cls): seed = 333 paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) dy_lenet = LeNetDygraph() cls.init_param = dy_lenet.state_dict() @@ -223,6 +224,7 @@ def fit(self, dynamic, num_replicas=None, rank=None): fluid.enable_dygraph(self.device) if dynamic else None seed = 333 paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) net = LeNet(classifier_activation=None) optim_new = fluid.optimizer.Adam( @@ -327,6 +329,7 @@ def forward(self, x): class TestModelFunction(unittest.TestCase): def set_seed(self, seed=1024): paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) def test_train_batch(self, dynamic=True): dim = 20 From 095612853013ae816e568e52fef27e839e935cb2 Mon Sep 17 00:00:00 2001 From: zhiqiu Date: Thu, 27 Aug 2020 16:26:28 +0800 Subject: [PATCH 25/28] fix test bug --- paddle/fluid/operators/dropout_op.h | 5 ++-- .../dygraph_to_static/test_sentiment.py | 1 + .../unittests/parallel_executor_test_base.py | 1 + .../tests/unittests/test_compiled_program.py | 2 +- .../unittests/test_imperative_save_load_v2.py | 2 +- .../paddle/incubate/hapi/tests/test_text.py | 30 +++++++++---------- 6 files changed, 21 insertions(+), 20 deletions(-) diff --git a/paddle/fluid/operators/dropout_op.h b/paddle/fluid/operators/dropout_op.h index 03602429f21e4..9d9eb4a82a075 100644 --- a/paddle/fluid/operators/dropout_op.h +++ b/paddle/fluid/operators/dropout_op.h @@ -55,16 +55,15 @@ class CPUDropoutKernel : public framework::OpKernel { std::memset(mask_data, 0, size * sizeof(*mask_data)); // NOLINT return; } - std::random_device rnd; // std::minstd_rand engine; // NOTE: fixed seed should only be used in unittest or for debug. // Guarantee to use random seed in training. - int seed_data; + int seed_data = 0; if (seed) { seed_data = *(seed->data()); } else { seed_data = - context.Attr("fix_seed") ? context.Attr("seed") : rnd(); + context.Attr("fix_seed") ? context.Attr("seed") : 0; } auto engine = framework::GetCPURandomEngine(seed_data); diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_sentiment.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_sentiment.py index e46fc726287d5..2aa3396fb7f85 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_sentiment.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_sentiment.py @@ -287,6 +287,7 @@ def train(args, to_static): with fluid.dygraph.guard(place): np.random.seed(SEED) paddle.manual_seed(SEED) + paddle.framework.random._manual_program_seed(SEED) train_reader = fake_data_reader(args.class_num, args.vocab_size, args.batch_size, args.padding_size) diff --git a/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py b/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py index 3c760e154ba20..9c3ed13cbb000 100644 --- a/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py +++ b/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py @@ -66,6 +66,7 @@ def run_executor(exe, binary, feed, fetch_list): ), "feed_data_reader must be type of FeedDataReader" paddle.manual_seed(1) + paddle.framework.random._manual_program_seed(1) main = fluid.Program() startup = fluid.Program() diff --git a/python/paddle/fluid/tests/unittests/test_compiled_program.py b/python/paddle/fluid/tests/unittests/test_compiled_program.py index c6cccf11633ba..751fed2e56126 100644 --- a/python/paddle/fluid/tests/unittests/test_compiled_program.py +++ b/python/paddle/fluid/tests/unittests/test_compiled_program.py @@ -31,7 +31,7 @@ def setUp(self): low=0, high=10, size=[16, 1], dtype=np.int64) with new_program_scope(): paddle.manual_seed(self.seed) - paddle.framework.random._manual_program_seed(seed) + paddle.framework.random._manual_program_seed(self.seed) place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda( ) else fluid.CPUPlace() exe = fluid.Executor(place) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_save_load_v2.py b/python/paddle/fluid/tests/unittests/test_imperative_save_load_v2.py index f26d6c37e1471..5e3e8f224e01f 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_save_load_v2.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_save_load_v2.py @@ -220,7 +220,7 @@ def setUp(self): with fluid.dygraph.guard(): paddle.manual_seed(seed) - paddle.framework.random._manual_program_seed(123) + paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to ptb_model = PtbModel( hidden_size=hidden_size, diff --git a/python/paddle/incubate/hapi/tests/test_text.py b/python/paddle/incubate/hapi/tests/test_text.py index 3c5323756b2bd..06fb639525a12 100644 --- a/python/paddle/incubate/hapi/tests/test_text.py +++ b/python/paddle/incubate/hapi/tests/test_text.py @@ -88,16 +88,18 @@ def _calc_output(self, place, mode="test", dygraph=True): fluid.enable_dygraph(place) else: fluid.disable_dygraph() - paddle.manual_seed(self._random_seed) + gen = paddle.manual_seed(self._random_seed) + gen._is_init_py = False paddle.framework.random._manual_program_seed(self._random_seed) - - layer = self.model_cls(**self.attrs) if isinstance( - self.attrs, dict) else self.model_cls(*self.attrs) - model = Model(layer, inputs=self.make_inputs()) - model.prepare() - if self.param_states: - model.load(self.param_states, optim_state=None) - return model.test_batch(self.inputs) + scope = fluid.core.Scope() + with fluid.scope_guard(scope): + layer = self.model_cls(**self.attrs) if isinstance( + self.attrs, dict) else self.model_cls(*self.attrs) + model = Model(layer, inputs=self.make_inputs()) + model.prepare() + if self.param_states: + model.load(self.param_states, optim_state=None) + return model.test_batch(self.inputs) def check_output_with_place(self, place, mode="test"): dygraph_output = self._calc_output(place, mode, dygraph=True) @@ -113,6 +115,7 @@ def check_output_with_place(self, place, mode="test"): def check_output(self): devices = ["CPU", "GPU"] if fluid.is_compiled_with_cuda() else ["CPU"] + devices = ['PU'] for device in devices: place = set_device(device) self.check_output_with_place(place) @@ -131,12 +134,9 @@ def setUp(self): @staticmethod def model_init(model, input_size, hidden_size): - model.lstm = RNN( - BasicLSTMCell( - input_size, - hidden_size, - param_attr=fluid.ParamAttr(name="lstm_weight"), - bias_attr=fluid.ParamAttr(name="lstm_bias"))) + model.lstm = RNN(BasicLSTMCell( + input_size, + hidden_size, )) @staticmethod def model_forward(model, inputs): From c1fa2fb53746c29c4b661c9d8d44305977d15aa1 Mon Sep 17 00:00:00 2001 From: zhiqiu Date: Thu, 27 Aug 2020 17:53:27 +0800 Subject: [PATCH 26/28] fix test bug --- python/paddle/fluid/generator.py | 26 +++++++++++++------ .../dygraph_to_static/test_resnet.py | 1 + .../fluid/tests/unittests/test_layers.py | 10 +++---- .../fluid/tests/unittests/test_py_func_op.py | 12 ++++----- python/paddle/framework/random.py | 6 ++--- .../paddle/incubate/hapi/tests/test_text.py | 1 - 6 files changed, 32 insertions(+), 24 deletions(-) diff --git a/python/paddle/fluid/generator.py b/python/paddle/fluid/generator.py index 8bc6863831112..98924f801413b 100644 --- a/python/paddle/fluid/generator.py +++ b/python/paddle/fluid/generator.py @@ -17,18 +17,28 @@ __all__ = ['Generator'] -default_rng_seed_val = 34342423252 - class Generator(core.Generator): """Generator class""" - def __init__(self, device="CPU"): - """init""" - self.device = device - if self.device == "CPU": + def __init__(self, place=None): + """ + Create a generator object which manages the random number generation. ( Experimental Feature ) + + Parameters: + place(CPUPlace|CUDAPinnedPlace|CUDAPlace, optional): The place to allocate Tensor. Can be + CPUPlace, CUDAPinnedPlace, CUDAPlace. Default: None, means global place. + + Returns: + Generator: A generator object. + + """ + self.place = place + if not place: + place = core.CPUPlace() + if isinstance(place, core.CPUPlace): super(Generator, self).__init__() else: raise ValueError( - "generator class with device %s does not exist, currently only support generator with device 'CPU' " - % device) + "Generator class with %s does is not supported yet, currently only support generator with CPUPlace " + % place) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_resnet.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_resnet.py index 674d7dd99ced4..6556b2f03bd53 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_resnet.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_resnet.py @@ -216,6 +216,7 @@ def train(to_static): with fluid.dygraph.guard(place): np.random.seed(SEED) paddle.manual_seed(SEED) + paddle.framework.random._manual_program_seed(SEED) train_reader = paddle.batch( reader_decorator(paddle.dataset.flowers.train(use_xmap=False)), diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index 76a2be02f1f77..eb79a80da99fa 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -1034,7 +1034,7 @@ def test_nce(self): static_rlt2 = self.get_static_graph_result( feed=feed_dict, fetch_list=[nce_loss2])[0] - with self.dynamic_graph(force_to_use_cpu=True): + with self.dynamic_graph(): words = [] for i in range(window_size): words.append(base.to_variable(inp_word[i])) @@ -1070,7 +1070,7 @@ def test_nce(self): self.assertTrue(np.allclose(static_rlt2, static_rlt)) self.assertTrue(np.allclose(dy_rlt_value, static_rlt)) - with self.dynamic_graph(force_to_use_cpu=True): + with self.dynamic_graph(): custom_weight = np.random.randn(dict_size, 128).astype("float32") weight_attr = fluid.ParamAttr( initializer=fluid.initializer.NumpyArrayInitializer( @@ -1996,13 +1996,13 @@ def test_accuracy(self): exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - x = np.random.rand(3, 32, 32).astype("float32") - y = np.array([[1], [0], [1]]) + # x = np.random.rand(3, 32, 32).astype("float32") + # y = np.array([[1], [0], [1]]) static_out = exe.run(feed={"input": x, "label": y}, fetch_list=result[0]) - with self.dynamic_graph(): + with self.dynamic_graph(force_to_use_cpu=True): data = base.to_variable(x) label = base.to_variable(y) fc_out = fluid.layers.fc(data, size=10) diff --git a/python/paddle/fluid/tests/unittests/test_py_func_op.py b/python/paddle/fluid/tests/unittests/test_py_func_op.py index 3e43e15bef168..32d8f73552f71 100644 --- a/python/paddle/fluid/tests/unittests/test_py_func_op.py +++ b/python/paddle/fluid/tests/unittests/test_py_func_op.py @@ -147,10 +147,8 @@ def test_main(use_cuda, use_py_func_op, use_parallel_executor): with fluid.program_guard(fluid.Program(), fluid.Program()): with fluid.scope_guard(fluid.core.Scope()): - paddle.manual_seed(1) - paddle.framework.random._manual_program_seed(1) + gen = paddle.manual_seed(1) np.random.seed(1) - img = fluid.layers.data(name='image', shape=[784], dtype='float32') label = fluid.layers.data(name='label', shape=[1], dtype='int64') loss = simple_fc_net(img, label, use_py_func_op) @@ -189,17 +187,17 @@ def setUp(self): self.use_parallel_executor = False def test_loss_diff(self): - losses = [] for use_cuda in [True, False]: + losses = [] for use_py_func_op in [True, False]: L = test_main(use_cuda, use_py_func_op, self.use_parallel_executor) if L is not None: losses.append(L) - for idx in six.moves.range(len(losses) - 1): - max_diff = np.max(np.abs(losses[idx] - losses[0])) - self.assertAlmostEqual(max_diff, 0, delta=1e-3) + for idx in six.moves.range(len(losses) - 1): + max_diff = np.max(np.abs(losses[idx] - losses[0])) + self.assertAlmostEqual(max_diff, 0, delta=1e-3) class TestPyFuncOpUseParallelExecutor(TestPyFuncOpUseExecutor): diff --git a/python/paddle/framework/random.py b/python/paddle/framework/random.py index a3341ae7b4f66..2555d24464112 100644 --- a/python/paddle/framework/random.py +++ b/python/paddle/framework/random.py @@ -22,19 +22,19 @@ def manual_seed(seed): """ - Sets global seed for generating random numbers. + Sets the seed for global default generator, which manages the random number generation. Args: seed(int): The random seed to set. It is recommend to set a large int number. Returns: - Generator: a generator object. + Generator: The global default generator object. Examples: .. code-block:: python import paddle - paddle.manual_seed(102) + gen = paddle.manual_seed(102) """ #TODO(zhiqiu): 1. remove program.random_seed when all random-related op upgrade diff --git a/python/paddle/incubate/hapi/tests/test_text.py b/python/paddle/incubate/hapi/tests/test_text.py index 06fb639525a12..c4fef0d749ce7 100644 --- a/python/paddle/incubate/hapi/tests/test_text.py +++ b/python/paddle/incubate/hapi/tests/test_text.py @@ -115,7 +115,6 @@ def check_output_with_place(self, place, mode="test"): def check_output(self): devices = ["CPU", "GPU"] if fluid.is_compiled_with_cuda() else ["CPU"] - devices = ['PU'] for device in devices: place = set_device(device) self.check_output_with_place(place) From 0a356fb6647ec5655d08d41a3861a6b2ee0f8f97 Mon Sep 17 00:00:00 2001 From: zhiqiu Date: Thu, 27 Aug 2020 21:02:35 +0800 Subject: [PATCH 27/28] fix test bug --- paddle/fluid/framework/generator.cc | 6 ++++++ python/paddle/fluid/tests/unittests/test_generator.py | 5 ++++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/paddle/fluid/framework/generator.cc b/paddle/fluid/framework/generator.cc index 2b1f41c6583d2..9bde9e20b19a0 100644 --- a/paddle/fluid/framework/generator.cc +++ b/paddle/fluid/framework/generator.cc @@ -49,6 +49,12 @@ std::shared_ptr GetCPURandomEngine(uint64_t seed) { VLOG(4) << "Use random engine from generator"; return DefaultCPUGenerator()->GetCPUEngine(); } else { + // NOTE(zhiqiu): creating an engine instance everytime instead of using + // OpDefaultCPUEngine(), this is the legacy behavior of random operators. + // The benefit is that when runing PE with fixed-seed in multiple thrads, + // each thread has their own engine, and doesn't affect each other. + // + // And we need to measure the determinacy of Generator in PE. auto engine = std::make_shared(); if (seed == 0) { seed = GetRandomSeed(); diff --git a/python/paddle/fluid/tests/unittests/test_generator.py b/python/paddle/fluid/tests/unittests/test_generator.py index d40463ed788fb..8b1f420358d31 100644 --- a/python/paddle/fluid/tests/unittests/test_generator.py +++ b/python/paddle/fluid/tests/unittests/test_generator.py @@ -16,6 +16,7 @@ from __future__ import print_function import os import unittest +import paddle import paddle.fluid.generator as generator import time # temp for debug @@ -36,7 +37,9 @@ def test_basic_generator(self): gen.random() def test_basic_generator_error(self): - self.assertRaises(ValueError, generator.Generator, device="CUDA") + if paddle.fluid.core.is_compiled_with_cuda(): + self.assertRaises( + ValueError, generator.Generator, place=paddle.CUDAPlace(0)) if __name__ == "__main__": From d2ce79675e79b3a41b189a7777a4351b0d814201 Mon Sep 17 00:00:00 2001 From: zhiqiu Date: Fri, 28 Aug 2020 03:35:45 +0800 Subject: [PATCH 28/28] fix ci --- .../fluid/tests/unittests/dygraph_to_static/test_simnet.py | 1 + .../paddle/fluid/tests/unittests/test_imperative_deepcf.py | 3 +++ python/paddle/fluid/tests/unittests/test_jit_save_load.py | 6 +++++- 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_simnet.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_simnet.py index d5b3bfef94912..14b9ac2e99584 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_simnet.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_simnet.py @@ -109,6 +109,7 @@ def train(conf_dict, to_static): with fluid.dygraph.guard(place): paddle.manual_seed(SEED) + paddle.framework.random._manual_program_seed(SEED) conf_dict['dict_size'] = len(vocab) conf_dict['seq_len'] = args.seq_len diff --git a/python/paddle/fluid/tests/unittests/test_imperative_deepcf.py b/python/paddle/fluid/tests/unittests/test_imperative_deepcf.py index 2f3f98e5dbcf9..cc6c2f97a9334 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_deepcf.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_deepcf.py @@ -207,6 +207,7 @@ def test_deefcf(self): (users_np, items_np, labels_np, num_users, num_items, matrix) = get_data() paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) startup = fluid.Program() main = fluid.Program() @@ -243,6 +244,7 @@ def test_deefcf(self): with fluid.dygraph.guard(): paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) deepcf = DeepCF(num_users, num_items, matrix) adam = fluid.optimizer.AdamOptimizer( @@ -267,6 +269,7 @@ def test_deefcf(self): with fluid.dygraph.guard(): paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) deepcf2 = DeepCF(num_users, num_items, matrix) adam2 = fluid.optimizer.AdamOptimizer( diff --git a/python/paddle/fluid/tests/unittests/test_jit_save_load.py b/python/paddle/fluid/tests/unittests/test_jit_save_load.py index 6421631f9e88e..87b6e76a6d0ab 100644 --- a/python/paddle/fluid/tests/unittests/test_jit_save_load.py +++ b/python/paddle/fluid/tests/unittests/test_jit_save_load.py @@ -18,7 +18,7 @@ import pickle import unittest import numpy as np - +import paddle from paddle.static import InputSpec import paddle.fluid as fluid from paddle.fluid.dygraph import Linear @@ -109,6 +109,7 @@ def setUp(self): fluid.enable_dygraph() # config seed paddle.manual_seed(SEED) + paddle.framework.random._manual_program_seed(SEED) def train_and_save_model(self, model_path=None, configs=None): layer = LinearNet(784, 1) @@ -294,6 +295,7 @@ def setUp(self): fluid.enable_dygraph() # config seed paddle.manual_seed(SEED) + paddle.framework.random._manual_program_seed(SEED) def basic_save_load(self, layer, model_path, configs): # 1. train & save @@ -386,6 +388,7 @@ def setUp(self): fluid.enable_dygraph() # config seed paddle.manual_seed(SEED) + paddle.framework.random._manual_program_seed(SEED) # train and save base model self.train_and_save_orig_model() @@ -427,6 +430,7 @@ def setUp(self): fluid.enable_dygraph() # config seed paddle.manual_seed(SEED) + paddle.framework.random._manual_program_seed(SEED) def train_and_save(self): train_layer = LinearNetReturnHidden(8, 8)