diff --git a/paddle/fluid/framework/generator.cc b/paddle/fluid/framework/generator.cc index d00e38784c2c0..9bde9e20b19a0 100644 --- a/paddle/fluid/framework/generator.cc +++ b/paddle/fluid/framework/generator.cc @@ -12,67 +12,122 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include "paddle/fluid/framework/generator.h" + +#include + #include #include #include #include #include -#include "paddle/fluid/framework/generator.h" - namespace paddle { namespace framework { -std::shared_ptr Generator::gen_instance_ = NULL; +const std::shared_ptr& DefaultCPUGenerator() { + static auto default_cpu_generator = + std::make_shared(GetRandomSeed()); + VLOG(4) << "initial seed: " << default_cpu_generator->GetCurrentSeed() + << ", cpu engine: " << default_cpu_generator->GetCPUEngine().get(); + return default_cpu_generator; +} + +std::shared_ptr OpDefaultCPUEngine() { + static auto op_default_cpu_engine = std::make_shared(); + return op_default_cpu_engine; +} + +// NOTE(zhiqiu): there are 3 conditions: +// (1) op seed is not set and DefaultCPUGenerator is inited, use +// DefaultCPUGenerator +// (2) op seed is not set and DefaultCPUGenerator is not inited, use se +// OpDefaultCPUEngine() and set a radnom seed +// (3) op seed is set, use OpDefaultCPUEngine() and set the seed +std::shared_ptr GetCPURandomEngine(uint64_t seed) { + if (DefaultCPUGenerator()->GetIsInitPy() && seed == 0) { + VLOG(4) << "Use random engine from generator"; + return DefaultCPUGenerator()->GetCPUEngine(); + } else { + // NOTE(zhiqiu): creating an engine instance everytime instead of using + // OpDefaultCPUEngine(), this is the legacy behavior of random operators. + // The benefit is that when runing PE with fixed-seed in multiple thrads, + // each thread has their own engine, and doesn't affect each other. + // + // And we need to measure the determinacy of Generator in PE. + auto engine = std::make_shared(); + if (seed == 0) { + seed = GetRandomSeed(); + VLOG(4) << "Use default random engine with random seed = " << seed; + } else { + VLOG(4) << "Use default random engine with fixed random seed = " << seed; + } + static std::mutex mu_; + { + std::lock_guard lock(mu_); + engine->seed(seed); + } + return engine; + } +} -GeneratorState* Generator::GetState() { - std::lock_guard lock(this->mutex); - return this->state_.get(); +GeneratorState Generator::GetState() { + std::lock_guard lock(this->mu_); + state_.cpu_engine = *engine_; + return this->state_; } -void Generator::SetState(GeneratorState* state_in) { - std::lock_guard lock(this->mutex); - *this->state_ = *state_in; +void Generator::SetState(const GeneratorState& state) { + std::lock_guard lock(this->mu_); + this->state_ = state; + this->engine_ = std::make_shared(state.cpu_engine); } uint64_t Generator::GetCurrentSeed() { - std::lock_guard lock(this->mutex); - return this->state_->current_seed; + std::lock_guard lock(this->mu_); + return this->state_.current_seed; } uint64_t Generator::Seed() { - std::lock_guard lock(this->mutex); + std::lock_guard lock(this->mu_); uint64_t seed; std::random_device de; seed = ((((uint64_t)de()) << 32) + de()) & 0x1FFFFFFFFFFFFF; - this->state_->current_seed = seed; + this->state_.current_seed = seed; std::seed_seq seq({seed}); - this->state_->cpu_engine.seed(seq); + this->engine_->seed(seq); - return this->state_->current_seed; + return this->state_.current_seed; } void Generator::SetCurrentSeed(uint64_t seed) { - std::lock_guard lock(this->mutex); - this->state_->current_seed = uint64_t(seed); + std::lock_guard lock(this->mu_); + this->state_.current_seed = seed; std::seed_seq seq({seed}); - this->state_->cpu_engine.seed(seq); + this->engine_->seed(seq); } -std::mt19937_64& Generator::GetCPUEngine() { - std::lock_guard lock(this->mutex); - return this->state_->cpu_engine; +std::shared_ptr Generator::GetCPUEngine() { + std::lock_guard lock(this->mu_); + return this->engine_; } -void Generator::SetCPUEngine(std::mt19937_64 engine) { - std::lock_guard lock(this->mutex); - this->state_->cpu_engine = std::mt19937_64(engine); +void Generator::SetCPUEngine(std::shared_ptr engine) { + std::lock_guard lock(this->mu_); + this->engine_ = engine; } uint64_t Generator::Random64() { - std::lock_guard lock(this->mutex); - return this->state_->cpu_engine(); + std::lock_guard lock(this->mu_); + auto engine = this->engine_; + return (*engine)(); +} + +void Generator::SetIsInitPy(bool is_init_py) { + this->is_init_py_ = is_init_py; + VLOG(4) << "SetIsInitPy:" << this->is_init_py_; } +bool Generator::GetIsInitPy() const { return this->is_init_py_; } } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/generator.h b/paddle/fluid/framework/generator.h index 17870782ba72a..82b35f7ad550e 100644 --- a/paddle/fluid/framework/generator.h +++ b/paddle/fluid/framework/generator.h @@ -14,7 +14,9 @@ limitations under the License. */ #pragma once +#include #include + #include #include #include // temp for debug @@ -27,6 +29,12 @@ limitations under the License. */ namespace paddle { namespace framework { +static uint64_t GetRandomSeed() { + std::random_device rd; + // double has 53 bit significant, so limit uint64 to 53 bits + return ((((uint64_t)rd()) << 32) + rd()) & 0x1FFFFFFFFFFFFF; +} + struct GeneratorState { int64_t device = -1; uint64_t current_seed = 34342423252; @@ -35,62 +43,67 @@ struct GeneratorState { struct Generator { Generator() { - GeneratorState default_gen_state_cpu; - default_gen_state_cpu.device = -1; - default_gen_state_cpu.current_seed = 34342423252; - std::seed_seq seq({34342423252}); - default_gen_state_cpu.cpu_engine = std::mt19937_64(seq); - this->state_ = std::make_shared(default_gen_state_cpu); + auto seed = GetRandomSeed(); + std::seed_seq seq({seed}); + auto engine = std::make_shared(seq); + this->state_.cpu_engine = *engine; + this->state_.device = -1; + this->state_.current_seed = seed; + this->engine_ = engine; + VLOG(4) << "initial seed: " << this->state_.current_seed + << ", cpu engine: " << &this->state_.cpu_engine; + } + explicit Generator(uint64_t seed) { + std::seed_seq seq({seed}); + auto engine = std::make_shared(seq); + this->state_.cpu_engine = *engine; + this->state_.device = -1; + this->state_.current_seed = seed; + this->engine_ = engine; + VLOG(4) << "initial seed: " << this->state_.current_seed + << ", cpu engine: " << &this->state_.cpu_engine; + this->is_init_py_ = true; // TODO(zhiqiu): remove it in future } - explicit Generator(GeneratorState state_in) - : state_{std::make_shared(state_in)} {} - Generator(const Generator& other) - : Generator(other, std::lock_guard(other.mutex)) {} + Generator(const Generator& other) = delete; // get random state - GeneratorState* GetState(); + GeneratorState GetState(); // set random state - void SetState(GeneratorState* state_in); + void SetState(const GeneratorState&); // get current seed uint64_t GetCurrentSeed(); // random a seed and get uint64_t Seed(); - // set seed void SetCurrentSeed(uint64_t seed); // get cpu engine - std::mt19937_64& GetCPUEngine(); + std::shared_ptr GetCPUEngine(); // set cpu engine - void SetCPUEngine(std::mt19937_64 engine); + void SetCPUEngine(std::shared_ptr); uint64_t Random64(); - bool is_init_py = false; + void SetIsInitPy(bool); + bool GetIsInitPy() const; - // CPU Generator singleton - static std::shared_ptr GetInstance() { - if (NULL == gen_instance_) { - gen_instance_.reset(new paddle::framework::Generator()); - } - return gen_instance_; - } + private: + GeneratorState state_; + std::shared_ptr engine_; + mutable std::mutex mu_; + + // NOTE(zhiqiu): is_init_py_ is used to make generator be compatible with + // old seed, and it should be removed after all random-related operators + // and unittests upgrades to use generator. + bool is_init_py_ = false; +}; - static std::shared_ptr GetInstanceX() { - if (NULL == gen_instance_) { - gen_instance_.reset(new paddle::framework::Generator()); - } - gen_instance_->is_init_py = true; - return gen_instance_; - } +// The DefaultCPUGenerator is used in manual_seed() +const std::shared_ptr& DefaultCPUGenerator(); - private: - static std::shared_ptr gen_instance_; - std::shared_ptr state_; - mutable std::mutex mutex; +// If op seed is set or global is not set, the OpDefaultCPUEngine is used. +std::shared_ptr OpDefaultCPUEngine(); - Generator(const Generator& other, const std::lock_guard&) - : state_(std::make_shared(*(other.state_))) {} -}; +std::shared_ptr GetCPURandomEngine(uint64_t); } // namespace framework } // namespace paddle diff --git a/paddle/fluid/operators/bernoulli_op.cc b/paddle/fluid/operators/bernoulli_op.cc index c525da5953d76..79c4e2c2bba31 100644 --- a/paddle/fluid/operators/bernoulli_op.cc +++ b/paddle/fluid/operators/bernoulli_op.cc @@ -64,11 +64,11 @@ class BernoulliOpKernel int64_t size = x->numel(); std::uniform_real_distribution dist(0.0, 1.0); - auto gen_ptr = framework::Generator::GetInstance(); - std::mt19937_64 &gen_engine = gen_ptr->GetCPUEngine(); + auto gen_ptr = framework::DefaultCPUGenerator(); + auto engine = gen_ptr->GetCPUEngine(); for (int64_t i = 0; i < size; ++i) { - out_data[i] = BernoulliFunctor(in_data[i], dist(gen_engine)); + out_data[i] = BernoulliFunctor(in_data[i], dist(*engine)); } } }; // namespace operators diff --git a/paddle/fluid/operators/distributed/large_scale_kv.h b/paddle/fluid/operators/distributed/large_scale_kv.h index 0d7032e286caa..9e39e68cba779 100644 --- a/paddle/fluid/operators/distributed/large_scale_kv.h +++ b/paddle/fluid/operators/distributed/large_scale_kv.h @@ -14,20 +14,19 @@ #pragma once +#include #include #include #include // NOLINT #include #include +#include // NOLINT #include #include #include #include -#include // NOLINT - -#include #include "paddle/fluid/framework/generator.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/rw_lock.h" @@ -89,26 +88,17 @@ class UniformInitializer : public Initializer { min_ = std::stof(attrs[2]); max_ = std::stof(attrs[3]); - if (seed_ == 0) { - seed_ = std::random_device()(); - } - - random_engine_.seed(seed_); dist_ = std::uniform_real_distribution(min_, max_); + random_engine_ = framework::GetCPURandomEngine(seed_); } - float GetValue() override { - return framework::Generator::GetInstance()->is_init_py - ? dist_(framework::Generator::GetInstance()->GetCPUEngine()) - : dist_(random_engine_); - // return dist_(random_engine_); - } + float GetValue() override { return dist_(*random_engine_); } private: float min_; float max_; - std::minstd_rand random_engine_; + std::shared_ptr random_engine_; std::uniform_real_distribution dist_; }; @@ -139,26 +129,18 @@ class GaussianInitializer : public Initializer { mean_ = std::stof(attrs[2]); std_ = std::stof(attrs[3]); - if (seed_ == 0) { - seed_ = std::random_device()(); - } + random_engine_ = framework::GetCPURandomEngine(seed_); - random_engine_.seed(seed_); dist_ = std::normal_distribution(mean_, std_); } - float GetValue() override { - return framework::Generator::GetInstance()->is_init_py - ? dist_(framework::Generator::GetInstance()->GetCPUEngine()) - : dist_(random_engine_); - // return dist_(random_engine_); - } + float GetValue() override { return dist_(*random_engine_); } private: float std_; float mean_; - std::minstd_rand random_engine_; + std::shared_ptr random_engine_; std::normal_distribution dist_; }; diff --git a/paddle/fluid/operators/dropout_op.h b/paddle/fluid/operators/dropout_op.h index bce4c7ca19a60..9d9eb4a82a075 100644 --- a/paddle/fluid/operators/dropout_op.h +++ b/paddle/fluid/operators/dropout_op.h @@ -55,30 +55,22 @@ class CPUDropoutKernel : public framework::OpKernel { std::memset(mask_data, 0, size * sizeof(*mask_data)); // NOLINT return; } - - bool init_generator_py = framework::Generator::GetInstance()->is_init_py; - + // std::minstd_rand engine; // NOTE: fixed seed should only be used in unittest or for debug. // Guarantee to use random seed in training. - std::random_device rnd; - std::minstd_rand engine; - int seed_data; + int seed_data = 0; if (seed) { seed_data = *(seed->data()); } else { seed_data = - context.Attr("fix_seed") ? context.Attr("seed") : rnd(); + context.Attr("fix_seed") ? context.Attr("seed") : 0; } - engine.seed(seed_data); + auto engine = framework::GetCPURandomEngine(seed_data); std::uniform_real_distribution dist(0, 1); for (size_t i = 0; i < size; ++i) { - float cur_random = - init_generator_py - ? dist(framework::Generator::GetInstance()->GetCPUEngine()) - : dist(engine); - if (cur_random < dropout_prob) { + if (dist(*engine) < dropout_prob) { mask_data[i] = 0; y_data[i] = 0; } else { diff --git a/paddle/fluid/operators/gaussian_random_op.cc b/paddle/fluid/operators/gaussian_random_op.cc index 111d4ad449007..4f128463375b9 100644 --- a/paddle/fluid/operators/gaussian_random_op.cc +++ b/paddle/fluid/operators/gaussian_random_op.cc @@ -39,26 +39,14 @@ class CPUGaussianRandomKernel : public framework::OpKernel { tensor->Resize(shape); int64_t size = tensor->numel(); T* data = tensor->mutable_data(context.GetPlace()); + unsigned int seed = static_cast(context.Attr("seed")); + auto engine = framework::GetCPURandomEngine(seed); - if (framework::Generator::GetInstance()->is_init_py) { - std::mt19937_64& gen_engine = - framework::Generator::GetInstance()->GetCPUEngine(); - for (int64_t i = 0; i < size; ++i) { - data[i] = dist(gen_engine); - } - } else { - unsigned int seed = static_cast(context.Attr("seed")); - std::minstd_rand engine; - if (seed == 0) { - seed = std::random_device()(); - } - engine.seed(seed); - for (int64_t i = 0; i < size; ++i) { - data[i] = dist(engine); - } + for (int64_t i = 0; i < size; ++i) { + data[i] = dist(*engine); } } -}; +}; // namespace operators template class CPUGaussianRandomBatchSizeLikeKernel : public framework::OpKernel { diff --git a/paddle/fluid/operators/math/sampler.cc b/paddle/fluid/operators/math/sampler.cc index 86feaa72d5fa6..a4bdc923eecc3 100644 --- a/paddle/fluid/operators/math/sampler.cc +++ b/paddle/fluid/operators/math/sampler.cc @@ -13,11 +13,14 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/math/sampler.h" + #include + #include #include #include #include + #include "paddle/fluid/framework/generator.h" namespace paddle { @@ -28,22 +31,17 @@ Sampler::~Sampler() {} UniformSampler::UniformSampler(int64_t range, unsigned int seed) : Sampler(range, seed), inv_range_(1.0 / (range + 1)) { - random_engine_ = std::make_shared(seed_); + random_engine_ = framework::GetCPURandomEngine(seed_); dist_ = std::make_shared>(0, range); } -int64_t UniformSampler::Sample() const { - return framework::Generator::GetInstance()->is_init_py - ? (*dist_)(framework::Generator::GetInstance()->GetCPUEngine()) - : (*dist_)(*random_engine_); - // return (*dist_)(*random_engine_); -} +int64_t UniformSampler::Sample() const { return (*dist_)(*random_engine_); } float UniformSampler::Probability(int64_t value) const { return inv_range_; } LogUniformSampler::LogUniformSampler(int64_t range, unsigned int seed) : Sampler(range, seed), log_range_(log(range + 1)) { - random_engine_ = std::make_shared(seed_); + random_engine_ = framework::GetCPURandomEngine(seed_); dist_ = std::make_shared>(0, 1); } @@ -52,10 +50,7 @@ int64_t LogUniformSampler::Sample() const { // inverse_transform_sampling method // More details: // https://wanghaoshuang.github.io/2017/11/Log-uniform-distribution-sampler/ - auto cur_random = - framework::Generator::GetInstance()->is_init_py - ? (*dist_)(framework::Generator::GetInstance()->GetCPUEngine()) - : (*dist_)(*random_engine_); + auto cur_random = (*dist_)(*random_engine_); const int64_t value = static_cast(exp(cur_random * log_range_)) - 1; // Mathematically, value should be <= range_, but might not be due to some // floating point roundoff, so we mod by range_. @@ -74,7 +69,7 @@ CustomSampler::CustomSampler(int64_t range, const float *probabilities, const int *alias, const float *alias_probabilities, unsigned int seed) : Sampler(range, seed) { - random_engine_ = std::make_shared(seed_); + random_engine_ = framework::GetCPURandomEngine(seed_); real_dist_ = std::make_shared>(0, 1); int_dist_ = std::make_shared>(0, range); @@ -84,14 +79,8 @@ CustomSampler::CustomSampler(int64_t range, const float *probabilities, } int64_t CustomSampler::Sample() const { - auto index = - framework::Generator::GetInstance()->is_init_py - ? (*int_dist_)(framework::Generator::GetInstance()->GetCPUEngine()) - : (*int_dist_)(*random_engine_); - auto p = - framework::Generator::GetInstance()->is_init_py - ? (*real_dist_)(framework::Generator::GetInstance()->GetCPUEngine()) - : (*real_dist_)(*random_engine_); + auto index = (*int_dist_)(*random_engine_); + auto p = (*real_dist_)(*random_engine_); if (p > alias_probs_[index]) { int alias = alias_[index]; diff --git a/paddle/fluid/operators/math/sampler.h b/paddle/fluid/operators/math/sampler.h index 3fa5a7ae336a9..480576ef9dc8c 100644 --- a/paddle/fluid/operators/math/sampler.h +++ b/paddle/fluid/operators/math/sampler.h @@ -26,8 +26,8 @@ namespace math { // TODO(wanghaoshuang): Support for GPU /** -* Sample integers from [0, range). -*/ + * Sample integers from [0, range). + */ class Sampler { public: explicit Sampler(int64_t range, unsigned int seed = 0UL) : range_(range) { @@ -117,7 +117,7 @@ class CustomSampler : public Sampler { const int* alias_; const float* probs_; const int exceptional_val = -1; - std::shared_ptr random_engine_; + std::shared_ptr random_engine_; std::shared_ptr> real_dist_; std::shared_ptr> int_dist_; }; diff --git a/paddle/fluid/operators/mkldnn/gaussian_random_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/gaussian_random_mkldnn_op.cc index d0ecca78ae8b2..98200caca8cf6 100644 --- a/paddle/fluid/operators/mkldnn/gaussian_random_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/gaussian_random_mkldnn_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include + #include "paddle/fluid/framework/generator.h" #include "paddle/fluid/operators/fill_constant_op.h" #include "paddle/fluid/operators/mean_op.h" @@ -35,23 +36,11 @@ class GaussianMKLDNNKernel : public paddle::framework::OpKernel { T* data = tensor->mutable_data(context.GetPlace()); int64_t size = tensor->numel(); std::normal_distribution dist(mean, std); + unsigned int seed = static_cast(context.Attr("seed")); + auto engine = framework::GetCPURandomEngine(seed); - if (framework::Generator::GetInstance()->is_init_py) { - std::mt19937_64& gen_engine = - framework::Generator::GetInstance()->GetCPUEngine(); - for (int64_t i = 0; i < size; ++i) { - data[i] = dist(gen_engine); - } - } else { - unsigned int seed = static_cast(context.Attr("seed")); - std::minstd_rand engine; - if (seed == 0) { - seed = std::random_device()(); - } - engine.seed(seed); - for (int64_t i = 0; i < size; ++i) { - data[i] = dist(engine); - } + for (int64_t i = 0; i < size; ++i) { + data[i] = dist(*engine); } tensor->set_layout(DataLayout::kMKLDNN); diff --git a/paddle/fluid/operators/randint_op.cc b/paddle/fluid/operators/randint_op.cc index 662fe3bcb3b3b..b3a2e14331955 100644 --- a/paddle/fluid/operators/randint_op.cc +++ b/paddle/fluid/operators/randint_op.cc @@ -46,22 +46,11 @@ class CPURandintKernel : public framework::OpKernel { std::uniform_int_distribution dist(ctx.Attr("low"), ctx.Attr("high") - 1); + unsigned int seed = static_cast(ctx.Attr("seed")); + auto engine = framework::GetCPURandomEngine(seed); - if (framework::Generator::GetInstance()->is_init_py) { - std::mt19937_64& gen_engine = - framework::Generator::GetInstance()->GetCPUEngine(); - for (int64_t i = 0; i < size; ++i) data[i] = dist(gen_engine); - } else { - unsigned int seed = static_cast(ctx.Attr("seed")); - std::minstd_rand engine; - if (seed == 0) { - seed = std::random_device()(); - } - engine.seed(seed); - - for (int64_t i = 0; i < size; ++i) { - data[i] = dist(engine); - } + for (int64_t i = 0; i < size; ++i) { + data[i] = dist(*engine); } } }; diff --git a/paddle/fluid/operators/randperm_op.h b/paddle/fluid/operators/randperm_op.h index 0eb028ad80684..02aabb9a7b569 100644 --- a/paddle/fluid/operators/randperm_op.h +++ b/paddle/fluid/operators/randperm_op.h @@ -19,6 +19,7 @@ limitations under the License. */ #include #include #include + #include "paddle/fluid/framework/generator.h" #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/tensor_util.h" @@ -29,20 +30,12 @@ namespace operators { template static inline void random_permate(T* data_ptr, int num, unsigned int seed) { + auto engine = framework::GetCPURandomEngine(seed); for (int i = 0; i < num; ++i) { data_ptr[i] = static_cast(i); } - if (framework::Generator::GetInstance()->is_init_py) { - std::shuffle(data_ptr, data_ptr + num, - framework::Generator::GetInstance()->GetCPUEngine()); - } else { - if (seed == 0) { - seed = std::random_device()(); - } - std::srand(seed); - std::random_shuffle(data_ptr, data_ptr + num); - } + std::shuffle(data_ptr, data_ptr + num, *engine); } template diff --git a/paddle/fluid/operators/sampling_id_op.h b/paddle/fluid/operators/sampling_id_op.h index a09220b1ccd13..9bec08f593afe 100644 --- a/paddle/fluid/operators/sampling_id_op.h +++ b/paddle/fluid/operators/sampling_id_op.h @@ -51,20 +51,15 @@ class SamplingIdKernel : public framework::OpKernel { framework::TensorToVector(*input, context.device_context(), &ins_vector); unsigned int seed = static_cast(context.Attr("seed")); - std::minstd_rand engine; - if (seed == 0) { - seed = std::random_device()(); - } - engine.seed(seed); + std::uniform_real_distribution dist( static_cast(context.Attr("min")), static_cast(context.Attr("max"))); + auto engine = framework::GetCPURandomEngine(seed); std::vector ids(batch_size); for (int i = 0; i < batch_size; ++i) { - T r = framework::Generator::GetInstance()->is_init_py - ? dist(framework::Generator::GetInstance()->GetCPUEngine()) - : dist(engine); + T r = dist(*engine); int idx = width - 1; for (int j = 0; j < width; ++j) { if ((r -= ins_vector[i * width + j]) < 0) { diff --git a/paddle/fluid/operators/truncated_gaussian_random_op.cc b/paddle/fluid/operators/truncated_gaussian_random_op.cc index 3aa9ff544af63..419f0f7a2a578 100644 --- a/paddle/fluid/operators/truncated_gaussian_random_op.cc +++ b/paddle/fluid/operators/truncated_gaussian_random_op.cc @@ -14,6 +14,7 @@ limitations under the License. */ #include #include + #include "paddle/fluid/framework/generator.h" #include "paddle/fluid/framework/op_registry.h" @@ -167,22 +168,10 @@ class CPUTruncatedGaussianRandomKernel : public framework::OpKernel { TruncatedNormal truncated_normal(mean, std); int64_t size = tensor->numel(); - if (framework::Generator::GetInstance()->is_init_py) { - std::mt19937_64& gen_engine = - framework::Generator::GetInstance()->GetCPUEngine(); - for (int64_t i = 0; i < size; ++i) { - data[i] = truncated_normal(dist(gen_engine)); - } - } else { - unsigned int seed = static_cast(context.Attr("seed")); - std::minstd_rand engine; - if (seed == 0) { - seed = std::random_device()(); - } - engine.seed(seed); - for (int64_t i = 0; i < size; ++i) { - data[i] = truncated_normal(dist(engine)); - } + unsigned int seed = static_cast(context.Attr("seed")); + auto engine = framework::GetCPURandomEngine(seed); + for (int64_t i = 0; i < size; ++i) { + data[i] = truncated_normal(dist(*engine)); } } }; diff --git a/paddle/fluid/operators/uniform_random_op.cc b/paddle/fluid/operators/uniform_random_op.cc index a4487cde27799..9cffe09a33abf 100644 --- a/paddle/fluid/operators/uniform_random_op.cc +++ b/paddle/fluid/operators/uniform_random_op.cc @@ -12,7 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/uniform_random_op.h" + #include + #include "paddle/fluid/framework/generator.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" @@ -62,34 +64,12 @@ class CPUUniformRandomKernel : public framework::OpKernel { std::uniform_real_distribution dist( static_cast(ctx.Attr("min")), static_cast(ctx.Attr("max"))); - auto gen_ptr = framework::Generator::GetInstance(); - if (gen_ptr->is_init_py) { - std::mt19937_64 &gen_engine = gen_ptr->GetCPUEngine(); - // auto gen_engine = gen_ptr_->GetCPUEngine(); - // std::uniform_real_distribution dist( - // static_cast(ctx.Attr("min")), - // static_cast(ctx.Attr("max"))); + unsigned int seed = static_cast(ctx.Attr("seed")); + auto engine = framework::GetCPURandomEngine(seed); - for (int64_t i = 0; i < size; ++i) { - data[i] = dist(gen_engine); - } - } else { - unsigned int seed = static_cast(ctx.Attr("seed")); - std::minstd_rand engine; - if (seed == 0) { - seed = std::random_device()(); - } - engine.seed(seed); - // std::uniform_real_distribution dist( - // static_cast(ctx.Attr("min")), - // static_cast(ctx.Attr("max"))); - // int64_t size = tensor->numel(); - for (int64_t i = 0; i < size; ++i) { - data[i] = dist(engine); - } + for (int64_t i = 0; i < size; ++i) { + data[i] = dist(*engine); } - // std::mt19937_64 &engine = gen_ptr->GetCPUEngine(); - // auto engine = gen_ptr_->GetCPUEngine(); unsigned int diag_num = static_cast(ctx.Attr("diag_num")); @@ -139,12 +119,12 @@ class UniformRandomOp : public framework::OperatorWithKernel { if (ctx->HasInputs("ShapeTensorList")) { // top prority shape auto inputs_name = ctx->Inputs("ShapeTensorList"); - PADDLE_ENFORCE_GT( - inputs_name.size(), 0, - platform::errors::InvalidArgument( - "Input(ShapeTensorList)'size of Op(uniform_random) can't be zero." - "Please check the Attr(shape)'s size of" - "Op(fluid.layers.uniform_random).)")); + PADDLE_ENFORCE_GT(inputs_name.size(), 0, + platform::errors::InvalidArgument( + "Input(ShapeTensorList)'size of " + "Op(uniform_random) can't be zero." + "Please check the Attr(shape)'s size of" + "Op(fluid.layers.uniform_random).)")); auto out_dims = std::vector(inputs_name.size(), -1); ctx->SetOutputDim("Out", framework::make_ddim(out_dims)); diff --git a/paddle/fluid/operators/uniform_random_op.cu b/paddle/fluid/operators/uniform_random_op.cu index c024bb87b09c0..4df1e0ffeb975 100644 --- a/paddle/fluid/operators/uniform_random_op.cu +++ b/paddle/fluid/operators/uniform_random_op.cu @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include #include + #include "paddle/fluid/framework/generator.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" @@ -88,15 +89,12 @@ class GPUUniformRandomKernel : public framework::OpKernel { } T* data = tensor->mutable_data(context.GetPlace()); unsigned int seed = static_cast(context.Attr("seed")); - if (framework::Generator::GetInstance()->is_init_py) { - seed = static_cast( - framework::Generator::GetInstance()->GetCurrentSeed()); - } else { - if (seed == 0) { - std::random_device rd; - seed = rd(); - } + + if (seed == 0) { + std::random_device rd; + seed = rd(); } + T min = static_cast(context.Attr("min")); T max = static_cast(context.Attr("max")); unsigned int diag_num = diff --git a/paddle/fluid/pybind/generator_py.cc b/paddle/fluid/pybind/generator_py.cc index 3bccd5fb2dd92..90b7f50105253 100644 --- a/paddle/fluid/pybind/generator_py.cc +++ b/paddle/fluid/pybind/generator_py.cc @@ -29,23 +29,36 @@ namespace py = pybind11; namespace paddle { namespace pybind { -void BindGenerator(py::module* m) { - py::class_(*m, "GeneratorState", ""); - py::class_(*m, "mt19937_64", ""); +void BindGenerator(py::module* m_ptr) { + auto& m = *m_ptr; + py::class_>(m, "GeneratorState") + .def("current_seed", + [](std::shared_ptr& self) { + return self->current_seed; + }); + py::class_(m, "mt19937_64", ""); py::class_>( - *m, "Generator") - .def(py::init([]() { return framework::Generator::GetInstanceX(); }), - py::return_value_policy::reference) - .def("get_state", &framework::Generator::GetState, - py::return_value_policy::move) + m, "Generator") + .def("__init__", + [](framework::Generator& self) { + new (&self) framework::Generator(); + }) + .def("get_state", &framework::Generator::GetState) .def("set_state", &framework::Generator::SetState) - .def("manual_seed", &framework::Generator::SetCurrentSeed) + .def("manual_seed", + [](std::shared_ptr& self, uint64_t seed) { + self->SetCurrentSeed(seed); + return self; + }) .def("seed", &framework::Generator::Seed) .def("initial_seed", &framework::Generator::GetCurrentSeed) .def("random", &framework::Generator::Random64) - .def("get_cpu_engine", &framework::Generator::GetCPUEngine, - py::return_value_policy::move) - .def("set_cpu_engine", &framework::Generator::SetCPUEngine); + // .def("get_cpu_engine", &framework::Generator::GetCPUEngine) + // .def("set_cpu_engine", &framework::Generator::SetCPUEngine) + .def_property("_is_init_py", &framework::Generator::GetIsInitPy, + &framework::Generator::SetIsInitPy); + m.def("default_cpu_generator", &framework::DefaultCPUGenerator); } // end Generator } // end namespace pybind -} // end namespace paddle +} // namespace paddle diff --git a/python/paddle/dataset/tests/test_sentiment.py b/python/paddle/dataset/tests/test_sentiment.py index bb9830132e987..3540ea06b075e 100644 --- a/python/paddle/dataset/tests/test_sentiment.py +++ b/python/paddle/dataset/tests/test_sentiment.py @@ -42,9 +42,11 @@ def test_sort_files(self): def test_data_set(self): data_set = st.load_sentiment_data() last_label = -1 + for each in st.test(): self.assertNotEqual(each[1], last_label) last_label = each[1] + self.assertEqual(len(data_set), st.NUM_TOTAL_INSTANCES) self.assertEqual(len(list(st.train())), st.NUM_TRAINING_INSTANCES) self.assertEqual( diff --git a/python/paddle/fluid/contrib/tests/test_weight_decay_extend.py b/python/paddle/fluid/contrib/tests/test_weight_decay_extend.py index 2b331308de5ee..a5f08ca969ac4 100644 --- a/python/paddle/fluid/contrib/tests/test_weight_decay_extend.py +++ b/python/paddle/fluid/contrib/tests/test_weight_decay_extend.py @@ -92,9 +92,11 @@ def run_program(self, place, feed_list): return param_sum def check_weight_decay(self, place, model): + paddle.manual_seed(1) + paddle.framework.random._manual_program_seed(1) main_prog = fluid.framework.Program() startup_prog = fluid.framework.Program() - startup_prog.random_seed = 1 + with prog_scope_guard(main_prog=main_prog, startup_prog=startup_prog): data = fluid.layers.data( name="words", shape=[1], dtype="int64", lod_level=1) @@ -113,9 +115,11 @@ def check_weight_decay(self, place, model): return param_sum def check_weight_decay2(self, place, model): + paddle.manual_seed(1) + paddle.framework.random._manual_program_seed(1) main_prog = fluid.framework.Program() startup_prog = fluid.framework.Program() - startup_prog.random_seed = 1 + with prog_scope_guard(main_prog=main_prog, startup_prog=startup_prog): data = fluid.layers.data( name="words", shape=[1], dtype="int64", lod_level=1) diff --git a/python/paddle/fluid/generator.py b/python/paddle/fluid/generator.py index e11b2e484dce1..98924f801413b 100644 --- a/python/paddle/fluid/generator.py +++ b/python/paddle/fluid/generator.py @@ -17,44 +17,28 @@ __all__ = ['Generator'] -default_rng_seed_val = 34342423252 - -class Generator(object): +class Generator(core.Generator): """Generator class""" - def __init__(self, device="CPU"): - """init""" - self.device = device - seed_in = default_rng_seed_val - if self.device == "CPU": - self.generator = core.Generator() - # self.generator.manual_seed(seed_in) - else: - raise ValueError( - "generator class with device %s does not exist, currently only support generator with device 'CPU' " - % device) - - def get_state(self): - return self.generator.get_state() - - def set_state(self, state): - self.generator.set_state(state) + def __init__(self, place=None): + """ + Create a generator object which manages the random number generation. ( Experimental Feature ) - def manual_seed(self, seed): - self.generator.manual_seed(seed) + Parameters: + place(CPUPlace|CUDAPinnedPlace|CUDAPlace, optional): The place to allocate Tensor. Can be + CPUPlace, CUDAPinnedPlace, CUDAPlace. Default: None, means global place. - def seed(self): - return self.generator.seed() + Returns: + Generator: A generator object. - def initial_seed(self): - return self.generator.initial_seed() - - def random(self): - return self.generator.random() - - def get_cpu_engine(self): - return self.generator.get_cpu_engine() - - def set_cpu_engine(self, cpu_engine): - self.generator.set_cpu_engine(cpu_engine) + """ + self.place = place + if not place: + place = core.CPUPlace() + if isinstance(place, core.CPUPlace): + super(Generator, self).__init__() + else: + raise ValueError( + "Generator class with %s does is not supported yet, currently only support generator with CPUPlace " + % place) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_bmn.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_bmn.py index dd58a49bb55c2..af7e73c41464d 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_bmn.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_bmn.py @@ -15,6 +15,7 @@ import math import numpy as np import unittest +import paddle from paddle.jit import to_static import paddle.fluid as fluid from paddle.fluid import ParamAttr @@ -560,8 +561,8 @@ def train_bmn(args, place, to_static): loss_data = [] with fluid.dygraph.guard(place): - fluid.default_main_program().random_seed = SEED - fluid.default_startup_program().random_seed = SEED + paddle.manual_seed(SEED) + paddle.framework.random._manual_program_seed(SEED) global local_random local_random = np.random.RandomState(SEED) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_lac.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_lac.py index 0e2bac9fa5b5c..4d735b565ddbc 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_lac.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_lac.py @@ -21,6 +21,7 @@ import os os.environ["CUDA_VISIBLE_DEVICES"] = "2" +import paddle import paddle.fluid as fluid from paddle.fluid.dygraph import to_variable from paddle.fluid.dygraph import Embedding, Linear, GRUUnit @@ -448,8 +449,8 @@ def do_train(args, to_static): place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda( ) else fluid.CPUPlace() with fluid.dygraph.guard(place): - fluid.default_startup_program().random_seed = SEED - fluid.default_main_program().random_seed = SEED + paddle.manual_seed(SEED) + paddle.framework.random._manual_program_seed(SEED) reader = get_random_input_data(args.batch_size, args.vocab_size, args.num_labels) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mobile_net.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mobile_net.py index 5ec3de5871dd6..a377075062b26 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mobile_net.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mobile_net.py @@ -14,6 +14,7 @@ import time import numpy as np +import paddle import paddle.fluid as fluid from paddle.fluid.initializer import MSRA from paddle.fluid.param_attr import ParamAttr @@ -447,8 +448,8 @@ def train_mobilenet(args, to_static): with fluid.dygraph.guard(args.place): np.random.seed(SEED) - fluid.default_startup_program().random_seed = SEED - fluid.default_main_program().random_seed = SEED + paddle.manual_seed(SEED) + paddle.framework.random._manual_program_seed(SEED) if args.model == "MobileNetV1": net = MobileNetV1(class_dim=args.class_dim, scale=1.0) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ptb_lm.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ptb_lm.py index 790319936ac01..df2b69297bb4d 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ptb_lm.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ptb_lm.py @@ -19,7 +19,7 @@ import unittest import numpy as np - +import paddle import paddle.fluid as fluid from paddle.fluid.dygraph.dygraph_to_static import ProgramTranslator from paddle.fluid.dygraph.base import to_variable @@ -218,8 +218,8 @@ def train(place): batch_num = 200 with fluid.dygraph.guard(place): - fluid.default_startup_program().random_seed = SEED - fluid.default_main_program().random_seed = SEED + paddle.manual_seed(SEED) + paddle.framework.random._manual_program_seed(SEED) ptb_model = PtbModel( hidden_size=hidden_size, vocab_size=vocab_size, diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_reinforcement_learning.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_reinforcement_learning.py index 4813930159744..1d211197ebd48 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_reinforcement_learning.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_reinforcement_learning.py @@ -16,6 +16,7 @@ import math import itertools import numpy as np +import paddle import paddle.fluid as fluid import paddle.fluid.dygraph.nn as nn from paddle.fluid.dygraph import to_variable, Layer @@ -64,8 +65,8 @@ def train(args, place, to_static): env.seed(SEED) with fluid.dygraph.guard(place): - fluid.default_main_program().random_seed = SEED - fluid.default_startup_program().random_seed = SEED + paddle.manual_seed(SEED) + paddle.framework.random._manual_program_seed(SEED) local_random = np.random.RandomState(SEED) policy = Policy() diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_resnet.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_resnet.py index 46eb2b42e9265..6556b2f03bd53 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_resnet.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_resnet.py @@ -215,8 +215,8 @@ def train(to_static): """ with fluid.dygraph.guard(place): np.random.seed(SEED) - fluid.default_startup_program().random_seed = SEED - fluid.default_main_program().random_seed = SEED + paddle.manual_seed(SEED) + paddle.framework.random._manual_program_seed(SEED) train_reader = paddle.batch( reader_decorator(paddle.dataset.flowers.train(use_xmap=False)), diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_se_resnet.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_se_resnet.py index 30cba78fec19c..38e4d5ad5480b 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_se_resnet.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_se_resnet.py @@ -331,8 +331,8 @@ def train(train_reader, to_static): np.random.seed(SEED) with fluid.dygraph.guard(place): - fluid.default_startup_program().random_seed = SEED - fluid.default_main_program().random_seed = SEED + paddle.manual_seed(SEED) + paddle.framework.random._manual_program_seed(SEED) se_resnext = SeResNeXt() optimizer = optimizer_setting(train_parameters, se_resnext.parameters()) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_sentiment.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_sentiment.py index fd5a58be26be4..2aa3396fb7f85 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_sentiment.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_sentiment.py @@ -15,6 +15,7 @@ import unittest import numpy as np +import paddle import paddle.fluid as fluid from paddle.fluid.dygraph.nn import Conv2D, Linear, Embedding from paddle.fluid.dygraph import to_variable, ProgramTranslator, declarative @@ -285,8 +286,8 @@ def train(args, to_static): with fluid.dygraph.guard(place): np.random.seed(SEED) - fluid.default_startup_program().random_seed = SEED - fluid.default_main_program().random_seed = SEED + paddle.manual_seed(SEED) + paddle.framework.random._manual_program_seed(SEED) train_reader = fake_data_reader(args.class_num, args.vocab_size, args.batch_size, args.padding_size) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_simnet.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_simnet.py index 552a6307f3337..14b9ac2e99584 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_simnet.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_simnet.py @@ -108,8 +108,8 @@ def train(conf_dict, to_static): place = fluid.CPUPlace() with fluid.dygraph.guard(place): - fluid.default_startup_program().random_seed = SEED - fluid.default_main_program().random_seed = SEED + paddle.manual_seed(SEED) + paddle.framework.random._manual_program_seed(SEED) conf_dict['dict_size'] = len(vocab) conf_dict['seq_len'] = args.seq_len diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_transformer.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_transformer.py index 7aa465949eb70..4fc8d27d30cb8 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_transformer.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_transformer.py @@ -18,6 +18,7 @@ import unittest import numpy as np +import paddle import paddle.fluid as fluid import transformer_util as util @@ -31,10 +32,11 @@ def train_static(args, batch_generator): + paddle.manual_seed(SEED) + paddle.framework.random._manual_program_seed(SEED) train_prog = fluid.Program() startup_prog = fluid.Program() - train_prog.random_seed = SEED - startup_prog.random_seed = SEED + with fluid.program_guard(train_prog, startup_prog): with fluid.unique_name.guard(): # define input and reader @@ -128,8 +130,8 @@ def train_static(args, batch_generator): def train_dygraph(args, batch_generator): with fluid.dygraph.guard(place): if SEED is not None: - fluid.default_main_program().random_seed = SEED - fluid.default_startup_program().random_seed = SEED + paddle.manual_seed(SEED) + paddle.framework.random._manual_program_seed(SEED) # define data loader train_loader = fluid.io.DataLoader.from_generator(capacity=10) train_loader.set_batch_generator(batch_generator, places=place) @@ -220,7 +222,8 @@ def train_dygraph(args, batch_generator): def predict_dygraph(args, batch_generator): with fluid.dygraph.guard(place): - fluid.default_main_program().random_seed = SEED + paddle.manual_seed(SEED) + paddle.framework.random._manual_program_seed(SEED) # define data loader test_loader = fluid.io.DataLoader.from_generator(capacity=10) @@ -291,7 +294,8 @@ def predict_dygraph(args, batch_generator): def predict_static(args, batch_generator): test_prog = fluid.Program() with fluid.program_guard(test_prog): - test_prog.random_seed = SEED + paddle.manual_seed(SEED) + paddle.framework.random._manual_program_seed(SEED) # define input and reader input_field_names = util.encoder_data_input_fields + util.fast_decoder_data_input_fields diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tsm.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tsm.py index 13a97fb7478db..bedca412157f0 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tsm.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tsm.py @@ -20,7 +20,7 @@ import sys import time import unittest - +import paddle import paddle.fluid as fluid from paddle.fluid.dygraph import declarative, ProgramTranslator, to_variable from paddle.fluid.dygraph.nn import Conv2D, BatchNorm, Linear, Pool2D @@ -272,8 +272,8 @@ def train(args, fake_data_reader, to_static): random.seed(0) np.random.seed(0) with fluid.dygraph.guard(place): - fluid.default_startup_program().random_seed = 1000 - fluid.default_main_program().random_seed = 1000 + paddle.manual_seed(1000) + paddle.framework.random._manual_program_seed(1000) video_model = TSM_ResNet("TSM", train_config, 'Train') diff --git a/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py b/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py index ec6b81f138321..9c3ed13cbb000 100644 --- a/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py +++ b/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py @@ -17,6 +17,7 @@ import multiprocessing import os import unittest +import paddle import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid import compiler @@ -64,10 +65,11 @@ def run_executor(exe, binary, feed, fetch_list): feed_data_reader, FeedDataReader ), "feed_data_reader must be type of FeedDataReader" + paddle.manual_seed(1) + paddle.framework.random._manual_program_seed(1) main = fluid.Program() startup = fluid.Program() - startup.random_seed = 1 - main.random_seed = 1 + with fluid.program_guard(main, startup): feed_dict, loss = cls.build_model(feed_dict, get_data_from_feeder, main, method, optimizer) diff --git a/python/paddle/fluid/tests/unittests/test_buffer_shared_memory_reuse_pass.py b/python/paddle/fluid/tests/unittests/test_buffer_shared_memory_reuse_pass.py index 671efd8c72155..43d485a0a6d24 100644 --- a/python/paddle/fluid/tests/unittests/test_buffer_shared_memory_reuse_pass.py +++ b/python/paddle/fluid/tests/unittests/test_buffer_shared_memory_reuse_pass.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import paddle import paddle.fluid as fluid from paddle.fluid.framework import Parameter import numpy as np @@ -44,10 +45,10 @@ def setUp(self): def build_program_and_scope(self): self.place = fluid.CUDAPlace(0) if self.use_cuda else fluid.CPUPlace() + paddle.manual_seed(1) + paddle.framework.random._manual_program_seed(1) startup_program = fluid.Program() main_program = fluid.Program() - startup_program.random_seed = 1 - main_program.random_seed = 1 scope = fluid.Scope() with fluid.program_guard(main_program, startup_program): diff --git a/python/paddle/fluid/tests/unittests/test_compiled_program.py b/python/paddle/fluid/tests/unittests/test_compiled_program.py index 8430f39578047..751fed2e56126 100644 --- a/python/paddle/fluid/tests/unittests/test_compiled_program.py +++ b/python/paddle/fluid/tests/unittests/test_compiled_program.py @@ -16,6 +16,7 @@ import unittest import numpy as np +import paddle import paddle.fluid as fluid from paddle.fluid import core from test_imperative_base import new_program_scope @@ -29,8 +30,8 @@ def setUp(self): self.label = np.random.randint( low=0, high=10, size=[16, 1], dtype=np.int64) with new_program_scope(): - fluid.default_startup_program().random_seed = self.seed - fluid.default_main_program().random_seed = self.seed + paddle.manual_seed(self.seed) + paddle.framework.random._manual_program_seed(self.seed) place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda( ) else fluid.CPUPlace() exe = fluid.Executor(place) @@ -46,8 +47,8 @@ def setUp(self): def test_compiled_program_base(self): with new_program_scope(): - fluid.default_startup_program().random_seed = self.seed - fluid.default_main_program().random_seed = self.seed + paddle.manual_seed(self.seed) + paddle.framework.random._manual_program_seed(self.seed) place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda( ) else fluid.CPUPlace() exe = fluid.Executor(place) @@ -64,8 +65,8 @@ def test_compiled_program_base(self): def test_compiled_program_with_data_parallel(self): with new_program_scope(): - fluid.default_startup_program().random_seed = self.seed - fluid.default_main_program().random_seed = self.seed + paddle.manual_seed(self.seed) + paddle.framework.random._manual_program_seed(self.seed) place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda( ) else fluid.CPUPlace() exe = fluid.Executor(place) diff --git a/python/paddle/fluid/tests/unittests/test_decoupled_py_reader.py b/python/paddle/fluid/tests/unittests/test_decoupled_py_reader.py index a16f21c0f97c0..cc0f3745bbf7b 100644 --- a/python/paddle/fluid/tests/unittests/test_decoupled_py_reader.py +++ b/python/paddle/fluid/tests/unittests/test_decoupled_py_reader.py @@ -34,10 +34,10 @@ def random_reader(): def simple_fc_net(places, use_legacy_py_reader, use_double_buffer): + paddle.manual_seed(1) + paddle.framework.random._manual_program_seed(1) startup_prog = fluid.Program() main_prog = fluid.Program() - startup_prog.random_seed = 1 - main_prog.random_seed = 1 with fluid.unique_name.guard(): with fluid.program_guard(main_prog, startup_prog): diff --git a/python/paddle/fluid/tests/unittests/test_dygraph_multi_forward.py b/python/paddle/fluid/tests/unittests/test_dygraph_multi_forward.py index ae4355ec412c8..88b496c1d89e6 100644 --- a/python/paddle/fluid/tests/unittests/test_dygraph_multi_forward.py +++ b/python/paddle/fluid/tests/unittests/test_dygraph_multi_forward.py @@ -27,6 +27,8 @@ from paddle.fluid.dygraph.base import to_variable from test_imperative_base import new_program_scope +SEED = 123123111 + class SimpleImgConvPool(fluid.dygraph.Layer): def __init__(self, @@ -105,12 +107,11 @@ def forward(self, inputs): class TestDygraphMultiForward(unittest.TestCase): def test_mnist_forward_float32(self): - seed = 90 epoch_num = 1 - with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + with fluid.dygraph.guard(): + paddle.manual_seed(SEED) + paddle.framework.random._manual_program_seed(SEED) mnist = MNIST() sgd = SGDOptimizer( learning_rate=1e-3, parameter_list=mnist.parameters()) @@ -142,9 +143,8 @@ def test_mnist_forward_float32(self): dy_param_init_value[param.name] = param.numpy() with new_program_scope(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed - + paddle.manual_seed(SEED) + paddle.framework.random._manual_program_seed(SEED) exe = fluid.Executor(fluid.CPUPlace( ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) diff --git a/python/paddle/fluid/tests/unittests/test_eager_deletion_padding_rnn.py b/python/paddle/fluid/tests/unittests/test_eager_deletion_padding_rnn.py index 6c0bb97bf6f14..e0c0277270b40 100644 --- a/python/paddle/fluid/tests/unittests/test_eager_deletion_padding_rnn.py +++ b/python/paddle/fluid/tests/unittests/test_eager_deletion_padding_rnn.py @@ -18,6 +18,7 @@ import unittest import numpy as np +import paddle import paddle.fluid as fluid import paddle.fluid.core as core import paddle.fluid.layers as layers @@ -465,9 +466,9 @@ def set_customed_config(self): pass def _prepare_program(self, config, parallel=True): + paddle.manual_seed(config.random_seed) self.main_program = fluid.Program() self.startup_program = fluid.Program() - self.startup_program.random_seed = config.random_seed with fluid.program_guard(self.main_program, self.startup_program): with fluid.unique_name.guard(): res_vars = lm_model( diff --git a/python/paddle/fluid/tests/unittests/test_embedding_id_stop_gradient.py b/python/paddle/fluid/tests/unittests/test_embedding_id_stop_gradient.py index 5a562dc14650a..c18b7c5b044e7 100644 --- a/python/paddle/fluid/tests/unittests/test_embedding_id_stop_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_embedding_id_stop_gradient.py @@ -13,6 +13,7 @@ # limitations under the License. import numpy as np +import paddle import paddle.fluid as fluid import six import unittest @@ -37,13 +38,13 @@ def test_check_grad(self): self.assertTrue(np.array_equal(grad_value1, grad_value2)) def run_program(self, place, stop_gradient=False): + np.random.seed(1) + paddle.manual_seed(1) + paddle.framework.random._manual_program_seed(1) + startup_program = fluid.Program() main_program = fluid.Program() - np.random.seed(1) - startup_program.random_seed = 1 - main_program.random_seed = 1 - scope = fluid.Scope() with fluid.program_guard(main_program, startup_program): with fluid.scope_guard(scope): diff --git a/python/paddle/fluid/tests/unittests/test_fc_op.py b/python/paddle/fluid/tests/unittests/test_fc_op.py index e5a7e6c702aec..ec30cb70c5790 100644 --- a/python/paddle/fluid/tests/unittests/test_fc_op.py +++ b/python/paddle/fluid/tests/unittests/test_fc_op.py @@ -13,6 +13,7 @@ # limitations under the License. import unittest +import paddle import numpy as np from op_test import OpTest import paddle.fluid as fluid @@ -135,31 +136,32 @@ def config(self): class TestFcOp_NumFlattenDims_NegOne(unittest.TestCase): def test_api(self): - startup_program = Program() - main_program = Program() - startup_program.random_seed = SEED - main_program.random_seed = SEED - - with program_guard(main_program, startup_program): - input = np.random.random([2, 2, 25]).astype("float32") - x = fluid.layers.data( - name="x", - shape=[2, 2, 25], - append_batch_size=False, - dtype="float32") - - out_1 = fluid.layers.fc(input=x, size=1, num_flatten_dims=-1) - out_2 = fluid.layers.fc(input=x, size=1, num_flatten_dims=2) - - place = fluid.CPUPlace() if not core.is_compiled_with_cuda( - ) else fluid.CUDAPlace(0) - exe = fluid.Executor(place=place) - exe.run(startup_program) - res_1, res_2 = exe.run(main_program, - feed={"x": input}, - fetch_list=[out_1, out_2]) - - assert np.array_equal(res_1, res_2) + def run_program(num_flatten_dims): + paddle.manual_seed(SEED) + startup_program = Program() + main_program = Program() + + with program_guard(main_program, startup_program): + input = np.random.random([2, 2, 25]).astype("float32") + x = fluid.layers.data( + name="x", + shape=[2, 2, 25], + append_batch_size=False, + dtype="float32") + + out = fluid.layers.fc(input=x, + size=1, + num_flatten_dims=num_flatten_dims) + + place = fluid.CPUPlace() if not core.is_compiled_with_cuda( + ) else fluid.CUDAPlace(0) + exe = fluid.Executor(place=place) + exe.run(startup_program) + out = exe.run(main_program, feed={"x": input}, fetch_list=[out]) + + res_1 = run_program(-1) + res_2 = run_program(2) + self.assertTrue(np.array_equal(res_1, res_2)) class TestFCOpError(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_fuse_all_reduce_pass.py b/python/paddle/fluid/tests/unittests/test_fuse_all_reduce_pass.py index 06f8da84a28d2..47671ab3a85e8 100644 --- a/python/paddle/fluid/tests/unittests/test_fuse_all_reduce_pass.py +++ b/python/paddle/fluid/tests/unittests/test_fuse_all_reduce_pass.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + from simple_nets import simple_fc_net, fc_with_batchnorm, init_data, bow_net from fake_reader import fake_imdb_reader from parallel_executor_test_base import TestParallelExecutorBase diff --git a/python/paddle/fluid/tests/unittests/test_fuse_bn_act_pass.py b/python/paddle/fluid/tests/unittests/test_fuse_bn_act_pass.py index 62eef67a5695f..921dbdbc6d4e1 100644 --- a/python/paddle/fluid/tests/unittests/test_fuse_bn_act_pass.py +++ b/python/paddle/fluid/tests/unittests/test_fuse_bn_act_pass.py @@ -19,8 +19,6 @@ class TestFuseBatchNormActPass(unittest.TestCase): def build_program(self, main_program, startup_program, use_cuda, seed=1): - main_program.random_seed = seed - startup_program.random_seed = seed with fluid.program_guard(main_program, startup_program): x = fluid.layers.data(name='x', shape=[1, 28, 28], dtype='float32') y = fluid.layers.data(name="y", shape=[1], dtype='int64') @@ -59,6 +57,8 @@ def build_program(self, main_program, startup_program, use_cuda, seed=1): return x, y, loss def check(self, place, use_cuda): + paddle.manual_seed(1) + paddle.framework.random._manual_program_seed(1) main_program = fluid.Program() startup_program = fluid.Program() x, y, loss = self.build_program(main_program, startup_program, use_cuda) diff --git a/python/paddle/fluid/tests/unittests/test_fuse_optimizer_pass.py b/python/paddle/fluid/tests/unittests/test_fuse_optimizer_pass.py index b47bcd2a032a3..a22daeedd09e9 100644 --- a/python/paddle/fluid/tests/unittests/test_fuse_optimizer_pass.py +++ b/python/paddle/fluid/tests/unittests/test_fuse_optimizer_pass.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + from simple_nets import simple_fc_net, fc_with_batchnorm, init_data, bow_net from fake_reader import fake_imdb_reader from parallel_executor_test_base import TestParallelExecutorBase diff --git a/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py b/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py index 6b08c4250f61c..9ab8440407390 100644 --- a/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py +++ b/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py @@ -16,7 +16,7 @@ import unittest import numpy as np - +import paddle import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid.op import Operator @@ -37,6 +37,7 @@ def setUp(self): "seed": 10, "use_mkldnn": self.use_mkldnn } + paddle.manual_seed(10) self.outputs = {'Out': np.zeros((123, 92), dtype='float32')} diff --git a/python/paddle/fluid/tests/unittests/test_generator.py b/python/paddle/fluid/tests/unittests/test_generator.py index 6cc43d3d54982..8b1f420358d31 100644 --- a/python/paddle/fluid/tests/unittests/test_generator.py +++ b/python/paddle/fluid/tests/unittests/test_generator.py @@ -16,6 +16,7 @@ from __future__ import print_function import os import unittest +import paddle import paddle.fluid.generator as generator import time # temp for debug @@ -34,10 +35,11 @@ def test_basic_generator(self): st = gen.get_state() gen.set_state(st) gen.random() - gen.set_cpu_engine(gen.get_cpu_engine()) def test_basic_generator_error(self): - self.assertRaises(ValueError, generator.Generator, device="CUDA") + if paddle.fluid.core.is_compiled_with_cuda(): + self.assertRaises( + ValueError, generator.Generator, place=paddle.CUDAPlace(0)) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_generator_dataloader.py b/python/paddle/fluid/tests/unittests/test_generator_dataloader.py index 4f0beb8c0dcd5..7c1ff41f7e767 100644 --- a/python/paddle/fluid/tests/unittests/test_generator_dataloader.py +++ b/python/paddle/fluid/tests/unittests/test_generator_dataloader.py @@ -35,10 +35,10 @@ def random_reader(): def simple_fc_net(places, use_legacy_py_reader, use_double_buffer): + paddle.manual_seed(1) + paddle.framework.random._manual_program_seed(1) startup_prog = fluid.Program() main_prog = fluid.Program() - startup_prog.random_seed = 1 - main_prog.random_seed = 1 with fluid.unique_name.guard(): with fluid.program_guard(main_prog, startup_prog): diff --git a/python/paddle/fluid/tests/unittests/test_hsigmoid_op.py b/python/paddle/fluid/tests/unittests/test_hsigmoid_op.py index 5777bb3c6f5e3..5c9867e681524 100644 --- a/python/paddle/fluid/tests/unittests/test_hsigmoid_op.py +++ b/python/paddle/fluid/tests/unittests/test_hsigmoid_op.py @@ -16,6 +16,7 @@ import unittest import numpy as np +import paddle import paddle.fluid.core as core import paddle.fluid as fluid from paddle.fluid import Program, program_guard @@ -266,8 +267,8 @@ def hs_net_conf(self, is_sparse): def training_test(self, is_sparse): with fluid.program_guard(fluid.Program(), fluid.Program()): + paddle.manual_seed(1) start_up = fluid.default_startup_program() - start_up.random_seed = 1 # Fix random seed x = np.arange(6).reshape(6) path_table = np.array([(1, 2, -1), (1, 2, -1)]).astype('int64') path_code = np.array([(1, 0, -1), (0, 0, -1)]).astype('int64') diff --git a/python/paddle/fluid/tests/unittests/test_imperative_auto_mixed_precision.py b/python/paddle/fluid/tests/unittests/test_imperative_auto_mixed_precision.py index a4f3858d6fb24..fdf7adbfb45f0 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_auto_mixed_precision.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_auto_mixed_precision.py @@ -121,6 +121,7 @@ def test_minimize(self): def run_simple_conv(inp_np, use_scaler=True): paddle.manual_seed(10) + paddle.framework.random._manual_program_seed(10) with fluid.dygraph.guard(): model = SimpleConv( num_channels=3, @@ -204,6 +205,7 @@ def train_resnet(self, enable_amp=True): with fluid.dygraph.guard(): paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) resnet = ResNet(use_cudnn=True) optimizer = optimizer_setting( diff --git a/python/paddle/fluid/tests/unittests/test_imperative_deepcf.py b/python/paddle/fluid/tests/unittests/test_imperative_deepcf.py index af71d9d27b9a3..cc6c2f97a9334 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_deepcf.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_deepcf.py @@ -206,11 +206,10 @@ def test_deefcf(self): else: (users_np, items_np, labels_np, num_users, num_items, matrix) = get_data() - + paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) startup = fluid.Program() - startup.random_seed = seed main = fluid.Program() - main.random_seed = seed scope = fluid.core.Scope() with new_program_scope(main=main, startup=startup, scope=scope): @@ -244,8 +243,8 @@ def test_deefcf(self): sys.stderr.write('static loss %s\n' % static_loss) with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) deepcf = DeepCF(num_users, num_items, matrix) adam = fluid.optimizer.AdamOptimizer( @@ -269,8 +268,8 @@ def test_deefcf(self): sys.stderr.write('dynamic loss: %s %s\n' % (slice, dy_loss)) with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) deepcf2 = DeepCF(num_users, num_items, matrix) adam2 = fluid.optimizer.AdamOptimizer( diff --git a/python/paddle/fluid/tests/unittests/test_imperative_double_grad.py b/python/paddle/fluid/tests/unittests/test_imperative_double_grad.py index 227cd5d4acb29..720c9f95c251e 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_double_grad.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_double_grad.py @@ -312,6 +312,7 @@ def model_f(input): with fluid.dygraph.guard(): paddle.manual_seed(123) + paddle.framework.random._manual_program_seed(123) a = fluid.dygraph.to_variable(value) a.stop_gradient = False @@ -328,6 +329,7 @@ def model_f(input): with fluid.dygraph.guard(): paddle.manual_seed(123) + paddle.framework.random._manual_program_seed(123) a = fluid.dygraph.to_variable(value) a.stop_gradient = False diff --git a/python/paddle/fluid/tests/unittests/test_imperative_gan.py b/python/paddle/fluid/tests/unittests/test_imperative_gan.py index 80bdf2ea8a898..b752b439f0fa9 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_gan.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_gan.py @@ -56,13 +56,11 @@ def forward(self, inputs): class TestDygraphGAN(unittest.TestCase): def test_gan_float32(self): seed = 90 - + paddle.manual_seed(1) + paddle.framework.random._manual_program_seed(1) startup = fluid.Program() - startup.random_seed = seed discriminate_p = fluid.Program() generate_p = fluid.Program() - discriminate_p.random_seed = seed - generate_p.random_seed = seed scope = fluid.core.Scope() with new_program_scope( @@ -133,8 +131,8 @@ def test_gan_float32(self): dy_params = dict() with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(1) + paddle.framework.random._manual_program_seed(1) discriminator = Discriminator() generator = Generator() @@ -177,10 +175,9 @@ def test_gan_float32(self): dy_params2 = dict() with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed fluid.set_flags({'FLAGS_sort_sum_gradient': True}) - + paddle.manual_seed(1) + paddle.framework.random._manual_program_seed(1) discriminator2 = Discriminator() generator2 = Generator() sgd2 = SGDOptimizer( diff --git a/python/paddle/fluid/tests/unittests/test_imperative_gnn.py b/python/paddle/fluid/tests/unittests/test_imperative_gnn.py index 01f3c02774698..4db6f2d0da1d5 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_gnn.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_gnn.py @@ -61,12 +61,10 @@ def forward(self, x, adj): class TestDygraphGNN(unittest.TestCase): def test_gnn_float32(self): - seed = 90 - + paddle.manual_seed(90) + paddle.framework.random._manual_program_seed(90) startup = fluid.Program() - startup.random_seed = seed main = fluid.Program() - main.random_seed = seed scope = fluid.core.Scope() with new_program_scope(main=main, startup=startup, scope=scope): @@ -114,8 +112,8 @@ def test_gnn_float32(self): scope.find_var(model.gc.weight.name).get_tensor()) with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(90) + paddle.framework.random._manual_program_seed(90) features = np.ones([1, 100, 50], dtype=np.float32) # Use selected rows when it's supported. @@ -140,8 +138,8 @@ def test_gnn_float32(self): model_gc_weight_value = model.gc.weight.numpy() with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(90) + paddle.framework.random._manual_program_seed(90) features2 = np.ones([1, 100, 50], dtype=np.float32) # Use selected rows when it's supported. diff --git a/python/paddle/fluid/tests/unittests/test_imperative_lod_tensor_to_selected_rows.py b/python/paddle/fluid/tests/unittests/test_imperative_lod_tensor_to_selected_rows.py index 6349d71760934..f0fea2d7eb75c 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_lod_tensor_to_selected_rows.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_lod_tensor_to_selected_rows.py @@ -15,6 +15,7 @@ from __future__ import print_function import unittest +import paddle import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid.dygraph.nn import Embedding @@ -94,8 +95,8 @@ def simple_net_float32(self, is_sparse, dtype): for is_sort_sum_gradient in [True, False]: with fluid.dygraph.guard(place): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) simple_net = SimpleNet( hidden_size=hidden_size, @@ -139,8 +140,8 @@ def simple_net_float32(self, is_sparse, dtype): dy_loss_value = dy_loss.numpy() with new_program_scope(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) simple_net = SimpleNet( hidden_size=hidden_size, diff --git a/python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py b/python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py index 499a4311f6e17..5400b785d2929 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py @@ -16,6 +16,7 @@ import unittest import numpy as np import six +import paddle import paddle.fluid as fluid from paddle.fluid import core from paddle.fluid.dygraph.nn import Conv2D, Pool2D, Linear, BatchNorm, Embedding, GRUUnit @@ -401,9 +402,9 @@ def test_while_op(self): dtype='int64').reshape([1, Config.max_length]))) with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed fluid.set_flags({'FLAGS_sort_sum_gradient': True}) + paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) ocr_attention = OCRAttention() if Config.learning_rate_decay == "piecewise_decay": @@ -453,8 +454,8 @@ def test_while_op(self): dy_param_value[param.name] = param.numpy() with new_program_scope(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) exe = fluid.Executor(fluid.CPUPlace( ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) ocr_attention = OCRAttention() diff --git a/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py b/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py index a7783afc5cff3..7876675bcc6a1 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py @@ -74,8 +74,8 @@ def _check_exception(self, exception_message, place=None): with fluid.dygraph.guard(place): try: - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) mlp = MLP() optimizer = self.get_optimizer_dygraph( parameter_list=mlp.parameters()) @@ -91,8 +91,8 @@ def _check_mlp(self, place=None): ) else fluid.CUDAPlace(0) with fluid.dygraph.guard(place): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) mlp = MLP() optimizer = self.get_optimizer_dygraph( @@ -132,8 +132,8 @@ def _check_mlp(self, place=None): dy_param_value[param.name] = param.numpy() with new_program_scope(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) if place == None: place = fluid.CPUPlace() if not core.is_compiled_with_cuda( diff --git a/python/paddle/fluid/tests/unittests/test_imperative_optimizer_v2.py b/python/paddle/fluid/tests/unittests/test_imperative_optimizer_v2.py index 9f75c92b185ed..ca8fb4c220f5e 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_optimizer_v2.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_optimizer_v2.py @@ -74,8 +74,8 @@ def _check_exception(self, exception_message, place=None): with fluid.dygraph.guard(place): try: - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) mlp = MLP() optimizer = self.get_optimizer_dygraph( parameter_list=mlp.parameters()) @@ -91,8 +91,8 @@ def _check_mlp(self, place=None): ) else fluid.CUDAPlace(0) with fluid.dygraph.guard(place): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) mlp = MLP() optimizer = self.get_optimizer_dygraph( @@ -132,8 +132,8 @@ def _check_mlp(self, place=None): dy_param_value[param.name] = param.numpy() with new_program_scope(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) if place == None: place = fluid.CPUPlace() if not core.is_compiled_with_cuda( diff --git a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py index bd629f5f4a69a..fa23ff8e7c29f 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py @@ -15,6 +15,7 @@ from __future__ import print_function import unittest +import paddle import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid.dygraph.nn import Embedding @@ -225,8 +226,8 @@ def ptb_rnn_cpu_float32(self, is_sparse): traced_layer = None with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to ptb_model = PtbModel( hidden_size=hidden_size, @@ -293,8 +294,8 @@ def ptb_rnn_cpu_float32(self, is_sparse): dy_last_hidden_value = last_hidden.numpy() with new_program_scope(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) ptb_model = PtbModel( hidden_size=hidden_size, vocab_size=vocab_size, diff --git a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn_sorted_gradient.py b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn_sorted_gradient.py index 526c1706e2d08..0487f8dd9a640 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn_sorted_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn_sorted_gradient.py @@ -15,6 +15,7 @@ from __future__ import print_function import unittest +import paddle import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid.dygraph.nn import Embedding @@ -43,9 +44,10 @@ def ptb_rnn_sort_gradient_cpu_float32(self, is_sparse): batch_num = 200 with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed fluid.set_flags({'FLAGS_sort_sum_gradient': True}) + paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) + # TODO: marsyang1993 Change seed to ptb_model = PtbModel( hidden_size=hidden_size, @@ -93,8 +95,9 @@ def ptb_rnn_sort_gradient_cpu_float32(self, is_sparse): dy_last_hidden_value = last_hidden.numpy() with new_program_scope(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) + ptb_model = PtbModel( hidden_size=hidden_size, vocab_size=vocab_size, diff --git a/python/paddle/fluid/tests/unittests/test_imperative_reinforcement.py b/python/paddle/fluid/tests/unittests/test_imperative_reinforcement.py index 735ec4d3f1ea8..0076c61e58407 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_reinforcement.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_reinforcement.py @@ -64,8 +64,8 @@ def test_mnist_float32(self): mask = np.array(mask_list).astype("float32") with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) policy = Policy(input_size=4) @@ -105,8 +105,8 @@ def test_mnist_float32(self): dy_param_value[param.name] = param.numpy() with new_program_scope(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) exe = fluid.Executor(fluid.CPUPlace( ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_resnet.py b/python/paddle/fluid/tests/unittests/test_imperative_resnet.py index 815437072fde2..e8a2298c17d00 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_resnet.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_resnet.py @@ -251,8 +251,8 @@ def test_resnet_float32(self): traced_layer = None with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) resnet = ResNet() optimizer = optimizer_setting( @@ -334,8 +334,8 @@ def test_resnet_float32(self): dy_param_value[param.name] = param.numpy() with new_program_scope(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) exe = fluid.Executor(fluid.CPUPlace( ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_resnet_sorted_gradient.py b/python/paddle/fluid/tests/unittests/test_imperative_resnet_sorted_gradient.py index d26d6f25aa8ff..13b12da3318ca 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_resnet_sorted_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_resnet_sorted_gradient.py @@ -77,9 +77,10 @@ def test_resnet_sort_gradient_float32(self): batch_size = train_parameters["batch_size"] batch_num = 10 with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed fluid.set_flags({'FLAGS_sort_sum_gradient': True}) + paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) + resnet = ResNet() optimizer = optimizer_setting( train_parameters, parameter_list=resnet.parameters()) @@ -136,8 +137,8 @@ def test_resnet_sort_gradient_float32(self): dy_param_value[param.name] = param.numpy() with new_program_scope(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) exe = fluid.Executor(fluid.CPUPlace( ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_save_load.py b/python/paddle/fluid/tests/unittests/test_imperative_save_load.py index eb9dc926c8207..e20f5d6a72278 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_save_load.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_save_load.py @@ -219,8 +219,8 @@ def setUp(self): batch_num = 200 with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to ptb_model = PtbModel( hidden_size=hidden_size, @@ -305,8 +305,8 @@ def testLoadAndSetVarBase(self): batch_num = 200 with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to ptb_model = PtbModel( hidden_size=hidden_size, @@ -414,8 +414,8 @@ def testSetVariable(self): batch_num = 200 with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to ptb_model = PtbModel( hidden_size=hidden_size, @@ -521,8 +521,8 @@ def testSetNumpy(self): batch_num = 200 with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to ptb_model = PtbModel( hidden_size=hidden_size, @@ -634,8 +634,6 @@ def testSetVariableBeforeTrain(self): batch_num = 200 with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed # TODO: marsyang1993 Change seed to ptb_model = PtbModel( hidden_size=hidden_size, @@ -713,8 +711,8 @@ def testLoadAndSetVarBaseBeforeTrain(self): batch_num = 200 with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to ptb_model = PtbModel( hidden_size=hidden_size, @@ -804,9 +802,10 @@ def testSetNumpyBeforeTrain(self): batch_num = 200 with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to + ptb_model = PtbModel( hidden_size=hidden_size, vocab_size=vocab_size, diff --git a/python/paddle/fluid/tests/unittests/test_imperative_save_load_v2.py b/python/paddle/fluid/tests/unittests/test_imperative_save_load_v2.py index 4ab35a21aff43..5e3e8f224e01f 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_save_load_v2.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_save_load_v2.py @@ -219,8 +219,8 @@ def setUp(self): batch_num = 200 with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to ptb_model = PtbModel( hidden_size=hidden_size, @@ -305,8 +305,8 @@ def testLoadAndSetVarBase(self): batch_num = 200 with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to ptb_model = PtbModel( hidden_size=hidden_size, @@ -414,8 +414,8 @@ def testSetVariable(self): batch_num = 200 with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to ptb_model = PtbModel( hidden_size=hidden_size, @@ -521,8 +521,8 @@ def testSetNumpy(self): batch_num = 200 with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to ptb_model = PtbModel( hidden_size=hidden_size, @@ -634,8 +634,8 @@ def testSetVariableBeforeTrain(self): batch_num = 200 with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to ptb_model = PtbModel( hidden_size=hidden_size, @@ -713,8 +713,8 @@ def testLoadAndSetVarBaseBeforeTrain(self): batch_num = 200 with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to ptb_model = PtbModel( hidden_size=hidden_size, @@ -804,8 +804,8 @@ def testSetNumpyBeforeTrain(self): batch_num = 200 with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to ptb_model = PtbModel( hidden_size=hidden_size, diff --git a/python/paddle/fluid/tests/unittests/test_imperative_se_resnext.py b/python/paddle/fluid/tests/unittests/test_imperative_se_resnext.py index 283addaf6283a..a04e1e4e5aafe 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_se_resnext.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_se_resnext.py @@ -308,8 +308,8 @@ def test_se_resnext_float32(self): batch_num = 1 epoch_num = 1 with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) se_resnext = SeResNeXt() optimizer = optimizer_setting( @@ -367,8 +367,8 @@ def test_se_resnext_float32(self): dy_param_value[param.name] = param.numpy() with new_program_scope(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) exe = fluid.Executor(fluid.CPUPlace( ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_selected_rows_to_lod_tensor.py b/python/paddle/fluid/tests/unittests/test_imperative_selected_rows_to_lod_tensor.py index 3765cb784d652..794f59e48507e 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_selected_rows_to_lod_tensor.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_selected_rows_to_lod_tensor.py @@ -15,6 +15,7 @@ from __future__ import print_function import unittest +import paddle import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid.dygraph.nn import Embedding @@ -101,8 +102,8 @@ def simple_net_float(self, is_sparse, dtype): for is_sort_sum_gradient in [True, False]: traced_layer = None with fluid.dygraph.guard(place): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) simple_net = SimpleNet( hidden_size=hidden_size, @@ -145,8 +146,8 @@ def simple_net_float(self, is_sparse, dtype): dy_loss_value = dy_loss.numpy() with new_program_scope(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) simple_net = SimpleNet( hidden_size=hidden_size, diff --git a/python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py b/python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py index d603a7d6ca0de..e94157fa047ee 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py @@ -468,8 +468,8 @@ def build_optimizer(layer, cfg, loss=None): class DyGraphTrainModel(object): def __init__(self, cfg): - fluid.default_startup_program().random_seed = cfg.seed - fluid.default_main_program().random_seed = cfg.seed + paddle.manual_seed(1) + paddle.framework.random._manual_program_seed(1) self.generator = Generator(cfg) self.discriminator = Discriminator(cfg) @@ -529,12 +529,12 @@ def create_data_layer(): shape=[None, cfg.c_dim], dtype='float32', name='label_trg') return image_real, label_org, label_trg + paddle.manual_seed(cfg.seed) + paddle.framework.random._manual_program_seed(cfg.seed) self.gen_program = fluid.Program() gen_startup_program = fluid.Program() with fluid.program_guard(self.gen_program, gen_startup_program): - self.gen_program.random_seed = cfg.seed - gen_startup_program.random_seed = cfg.seed with fluid.unique_name.guard(): image_real, label_org, label_trg = create_data_layer() generator = Generator(cfg) @@ -546,8 +546,6 @@ def create_data_layer(): self.dis_program = fluid.Program() dis_startup_program = fluid.Program() with fluid.program_guard(self.dis_program, dis_startup_program): - self.dis_program.random_seed = cfg.seed - dis_startup_program.random_seed = cfg.seed with fluid.unique_name.guard(): image_real, label_org, label_trg = create_data_layer() generator = Generator(cfg) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_transformer_sorted_gradient.py b/python/paddle/fluid/tests/unittests/test_imperative_transformer_sorted_gradient.py index c59ce44ec96a8..9f58ef881e4e4 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_transformer_sorted_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_transformer_sorted_gradient.py @@ -15,6 +15,7 @@ from __future__ import print_function import unittest +import paddle import paddle.fluid as fluid from paddle.fluid import Embedding, LayerNorm, Linear, Layer from paddle.fluid.dygraph import to_variable, guard @@ -949,9 +950,9 @@ def transformer_sort_gradient_float32(self, is_sparse): seed = 90 with guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed fluid.set_flags({'FLAGS_sort_sum_gradient': True}) + paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) transformer = TransFormer( ModelHyperParams.src_vocab_size, ModelHyperParams.trg_vocab_size, @@ -1034,8 +1035,8 @@ def transformer_sort_gradient_float32(self, is_sparse): dy_token_num_value = dy_token_num.numpy() with new_program_scope(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) transformer = TransFormer( ModelHyperParams.src_vocab_size, ModelHyperParams.trg_vocab_size, diff --git a/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_ifelse_op.py b/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_ifelse_op.py index c5228fcf12274..eaa7e711a29c7 100644 --- a/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_ifelse_op.py +++ b/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_ifelse_op.py @@ -37,10 +37,10 @@ def check_network_convergence(self, use_cuda=True, use_mem_opt=False, iter_num=5): + paddle.manual_seed(100) + paddle.framework.random._manual_program_seed(100) prog = Program() startup_prog = Program() - prog.random_seed = 100 - startup_prog.random_seed = 100 with program_guard(prog, startup_prog): image = layers.data(name='x', shape=[784], dtype='float32') diff --git a/python/paddle/fluid/tests/unittests/test_jit_save_load.py b/python/paddle/fluid/tests/unittests/test_jit_save_load.py index 2b79659b9c695..87b6e76a6d0ab 100644 --- a/python/paddle/fluid/tests/unittests/test_jit_save_load.py +++ b/python/paddle/fluid/tests/unittests/test_jit_save_load.py @@ -18,7 +18,7 @@ import pickle import unittest import numpy as np - +import paddle from paddle.static import InputSpec import paddle.fluid as fluid from paddle.fluid.dygraph import Linear @@ -80,7 +80,7 @@ def forward(self, x): def train(layer, input_size=784, label_size=1): # create optimizer - adam = fluid.optimizer.SGDOptimizer( + sgd = fluid.optimizer.SGDOptimizer( learning_rate=0.01, parameter_list=layer.parameters()) # create data loader train_loader = fluid.io.DataLoader.from_generator(capacity=5) @@ -97,7 +97,7 @@ def train(layer, input_size=784, label_size=1): avg_loss = fluid.layers.mean(loss) avg_loss.backward() - adam.minimize(avg_loss) + sgd.minimize(avg_loss) layer.clear_gradients() return [img], layer, avg_loss @@ -108,7 +108,8 @@ def setUp(self): # enable dygraph mode fluid.enable_dygraph() # config seed - fluid.default_main_program().random_seed = SEED + paddle.manual_seed(SEED) + paddle.framework.random._manual_program_seed(SEED) def train_and_save_model(self, model_path=None, configs=None): layer = LinearNet(784, 1) @@ -149,8 +150,8 @@ def load_and_finetune(self, train_layer, load_train_layer): train_layer.train() load_train_layer.train() # train & compare - _, _, train_loss = train(train_layer) - _, _, load_train_loss = train(load_train_layer) + img0, _, train_loss = train(train_layer) + img1, _, load_train_loss = train(load_train_layer) self.assertTrue( np.array_equal(train_loss.numpy(), load_train_loss.numpy())) @@ -293,7 +294,8 @@ def setUp(self): # enable dygraph mode fluid.enable_dygraph() # config seed - fluid.default_main_program().random_seed = SEED + paddle.manual_seed(SEED) + paddle.framework.random._manual_program_seed(SEED) def basic_save_load(self, layer, model_path, configs): # 1. train & save @@ -385,7 +387,8 @@ def setUp(self): # enable dygraph mode fluid.enable_dygraph() # config seed - fluid.default_main_program().random_seed = SEED + paddle.manual_seed(SEED) + paddle.framework.random._manual_program_seed(SEED) # train and save base model self.train_and_save_orig_model() @@ -426,7 +429,8 @@ def setUp(self): # enable dygraph mode fluid.enable_dygraph() # config seed - fluid.default_main_program().random_seed = SEED + paddle.manual_seed(SEED) + paddle.framework.random._manual_program_seed(SEED) def train_and_save(self): train_layer = LinearNetReturnHidden(8, 8) diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index 1992a3bb39807..eb79a80da99fa 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -57,8 +57,8 @@ def _get_place(self, force_to_use_cpu=False): @contextlib.contextmanager def static_graph(self): with new_program_scope(): - fluid.default_startup_program().random_seed = self.seed - fluid.default_main_program().random_seed = self.seed + paddle.manual_seed(self.seed) + paddle.framework.random._manual_program_seed(self.seed) yield def get_static_graph_result(self, @@ -77,8 +77,8 @@ def get_static_graph_result(self, def dynamic_graph(self, force_to_use_cpu=False): with fluid.dygraph.guard( self._get_place(force_to_use_cpu=force_to_use_cpu)): - fluid.default_startup_program().random_seed = self.seed - fluid.default_main_program().random_seed = self.seed + paddle.manual_seed(self.seed) + paddle.framework.random._manual_program_seed(self.seed) yield @@ -1034,7 +1034,7 @@ def test_nce(self): static_rlt2 = self.get_static_graph_result( feed=feed_dict, fetch_list=[nce_loss2])[0] - with self.dynamic_graph(force_to_use_cpu=True): + with self.dynamic_graph(): words = [] for i in range(window_size): words.append(base.to_variable(inp_word[i])) @@ -1070,7 +1070,7 @@ def test_nce(self): self.assertTrue(np.allclose(static_rlt2, static_rlt)) self.assertTrue(np.allclose(dy_rlt_value, static_rlt)) - with self.dynamic_graph(force_to_use_cpu=True): + with self.dynamic_graph(): custom_weight = np.random.randn(dict_size, 128).astype("float32") weight_attr = fluid.ParamAttr( initializer=fluid.initializer.NumpyArrayInitializer( @@ -1996,13 +1996,13 @@ def test_accuracy(self): exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - x = np.random.rand(3, 32, 32).astype("float32") - y = np.array([[1], [0], [1]]) + # x = np.random.rand(3, 32, 32).astype("float32") + # y = np.array([[1], [0], [1]]) static_out = exe.run(feed={"input": x, "label": y}, fetch_list=result[0]) - with self.dynamic_graph(): + with self.dynamic_graph(force_to_use_cpu=True): data = base.to_variable(x) label = base.to_variable(y) fc_out = fluid.layers.fc(data, size=10) diff --git a/python/paddle/fluid/tests/unittests/test_manual_seed.py b/python/paddle/fluid/tests/unittests/test_manual_seed.py index 747026622e465..a1d6eb915ce78 100644 --- a/python/paddle/fluid/tests/unittests/test_manual_seed.py +++ b/python/paddle/fluid/tests/unittests/test_manual_seed.py @@ -15,30 +15,33 @@ from __future__ import print_function import unittest +import paddle import paddle.fluid as fluid from paddle.framework import manual_seed from paddle.fluid.framework import Program, default_main_program, default_startup_program +import numpy as np class TestManualSeed(unittest.TestCase): def test_manual_seed(self): - local_program = Program() - local_main_prog = default_main_program() - local_start_prog = default_startup_program() - - self.assertEqual(0, local_program.random_seed) - self.assertEqual(0, local_main_prog.random_seed) - self.assertEqual(0, local_start_prog.random_seed) - - manual_seed(102) - global_program1 = Program() - global_program2 = Program() - global_main_prog = default_main_program() - global_start_prog = default_startup_program() - self.assertEqual(102, global_program1.random_seed) - self.assertEqual(102, global_program2.random_seed) - self.assertEqual(102, global_main_prog.random_seed) - self.assertEqual(102, global_start_prog.random_seed) + fluid.enable_dygraph() + + gen = paddle.manual_seed(12312321111) + x = fluid.layers.gaussian_random([10], dtype="float32") + st1 = gen.get_state() + x1 = fluid.layers.gaussian_random([10], dtype="float32") + gen.set_state(st1) + x2 = fluid.layers.gaussian_random([10], dtype="float32") + gen.manual_seed(12312321111) + x3 = fluid.layers.gaussian_random([10], dtype="float32") + x_np = x.numpy() + x1_np = x1.numpy() + x2_np = x2.numpy() + x3_np = x3.numpy() + + if not fluid.core.is_compiled_with_cuda(): + self.assertTrue(np.allclose(x1_np, x2_np)) + self.assertTrue(np.allclose(x_np, x3_np)) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_py_func_op.py b/python/paddle/fluid/tests/unittests/test_py_func_op.py index 6045f2d713627..32d8f73552f71 100644 --- a/python/paddle/fluid/tests/unittests/test_py_func_op.py +++ b/python/paddle/fluid/tests/unittests/test_py_func_op.py @@ -147,10 +147,8 @@ def test_main(use_cuda, use_py_func_op, use_parallel_executor): with fluid.program_guard(fluid.Program(), fluid.Program()): with fluid.scope_guard(fluid.core.Scope()): - fluid.default_main_program().random_seed = 1 - fluid.default_startup_program().random_seed = 1 + gen = paddle.manual_seed(1) np.random.seed(1) - img = fluid.layers.data(name='image', shape=[784], dtype='float32') label = fluid.layers.data(name='label', shape=[1], dtype='int64') loss = simple_fc_net(img, label, use_py_func_op) @@ -189,17 +187,17 @@ def setUp(self): self.use_parallel_executor = False def test_loss_diff(self): - losses = [] for use_cuda in [True, False]: + losses = [] for use_py_func_op in [True, False]: L = test_main(use_cuda, use_py_func_op, self.use_parallel_executor) if L is not None: losses.append(L) - for idx in six.moves.range(len(losses) - 1): - max_diff = np.max(np.abs(losses[idx] - losses[0])) - self.assertAlmostEqual(max_diff, 0, delta=1e-3) + for idx in six.moves.range(len(losses) - 1): + max_diff = np.max(np.abs(losses[idx] - losses[0])) + self.assertAlmostEqual(max_diff, 0, delta=1e-3) class TestPyFuncOpUseParallelExecutor(TestPyFuncOpUseExecutor): diff --git a/python/paddle/fluid/tests/unittests/test_random_seed.py b/python/paddle/fluid/tests/unittests/test_random_seed.py index 2933abe46c1b8..343508bf619b6 100644 --- a/python/paddle/fluid/tests/unittests/test_random_seed.py +++ b/python/paddle/fluid/tests/unittests/test_random_seed.py @@ -26,27 +26,31 @@ class TestGeneratorSeed(unittest.TestCase): - """ - Test cases for cpu generator seed. - """ + # """ + # Test cases for cpu generator seed. + # """ def test_generator_uniform_random_dygraph(self): """Test Generator seed.""" - gen = generator.Generator() fluid.enable_dygraph() - gen.manual_seed(12312321111) + gen = paddle.manual_seed(12312321111) x = fluid.layers.uniform_random([10], dtype="float32", min=0.0, max=1.0) + st1 = gen.get_state() x1 = fluid.layers.uniform_random( [10], dtype="float32", min=0.0, max=1.0) + gen.set_state(st1) + print(gen.get_state()) x2 = fluid.layers.uniform_random( [10], dtype="float32", min=0.0, max=1.0) - gen.manual_seed(12312321111) + + paddle.manual_seed(12312321111) x3 = fluid.layers.uniform_random( [10], dtype="float32", min=0.0, max=1.0) + x_np = x.numpy() x1_np = x1.numpy() x2_np = x2.numpy() @@ -57,11 +61,9 @@ def test_generator_uniform_random_dygraph(self): self.assertTrue(np.allclose(x_np, x3_np)) def test_generator_uniform_random_static(self): - fluid.disable_dygraph() - gen = generator.Generator() - gen.manual_seed(123123143) + gen = paddle.manual_seed(123123143) startup_program = fluid.Program() train_program = fluid.Program() @@ -93,11 +95,9 @@ def test_generator_uniform_random_static(self): self.assertTrue(not np.allclose(out1_res2, out1_res1)) def test_gen_dropout_dygraph(self): - gen = generator.Generator() - fluid.enable_dygraph() - gen.manual_seed(111111111) + gen = paddle.manual_seed(111111111) st = gen.get_state() # x = np.arange(1,101).reshape(2,50).astype("float32") x = fluid.layers.uniform_random( @@ -110,8 +110,7 @@ def test_gen_dropout_dygraph(self): y1 = fluid.layers.dropout(x1, 0.5) y_np = y.numpy() y1_np = y1.numpy() - #print(y_np) - #print(y1_np) + if not core.is_compiled_with_cuda(): print(">>>>>>> dropout dygraph >>>>>>>") self.assertTrue(np.allclose(y_np, y1_np)) @@ -119,8 +118,7 @@ def test_gen_dropout_dygraph(self): def test_gen_dropout_static(self): fluid.disable_dygraph() - gen = generator.Generator() - gen.manual_seed(123123143) + gen = paddle.manual_seed(123123143) startup_program = fluid.Program() train_program = fluid.Program() @@ -137,19 +135,16 @@ def test_gen_dropout_static(self): out2 = exe.run(train_program, feed={}, fetch_list=[y_1]) out1_np = np.array(out1[0]) out2_np = np.array(out2[0]) - # print(out1_np) - # print(out2_np) + if not core.is_compiled_with_cuda(): print(">>>>>>> dropout static >>>>>>>") self.assertTrue(np.allclose(out1_np, out2_np)) def test_generator_gaussian_random_dygraph(self): """Test Generator seed.""" - gen = generator.Generator() - fluid.enable_dygraph() - gen.manual_seed(12312321111) + gen = paddle.manual_seed(12312321111) x = fluid.layers.gaussian_random([10], dtype="float32") st1 = gen.get_state() x1 = fluid.layers.gaussian_random([10], dtype="float32") @@ -168,11 +163,9 @@ def test_generator_gaussian_random_dygraph(self): self.assertTrue(np.allclose(x_np, x3_np)) def test_generator_gaussian_random_static(self): - fluid.disable_dygraph() - gen = generator.Generator() - gen.manual_seed(123123143) + gen = paddle.manual_seed(123123143) startup_program = fluid.Program() train_program = fluid.Program() @@ -210,7 +203,7 @@ def test_generator_randint_dygraph(self): fluid.enable_dygraph() - gen.manual_seed(12312321111) + gen = paddle.manual_seed(12312321111) x = paddle.randint(low=10, shape=[10], dtype="int32") st1 = gen.get_state() x1 = paddle.randint(low=10, shape=[10], dtype="int32") @@ -228,12 +221,64 @@ def test_generator_randint_dygraph(self): self.assertTrue(np.allclose(x1_np, x2_np)) self.assertTrue(np.allclose(x_np, x3_np)) - def test_generator_ranint_static(self): + def test_generator_uniform_random_static(self): + fluid.disable_dygraph() + + gen = paddle.manual_seed(123123143) + + startup_program = fluid.Program() + train_program = fluid.Program() + with fluid.program_guard(train_program, startup_program): + # example 1: + # attr shape is a list which doesn't contain tensor Variable. + result_1 = fluid.layers.uniform_random(shape=[3, 4]) + result_2 = fluid.layers.uniform_random(shape=[3, 4]) + + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(startup_program) + out1 = exe.run(train_program, + feed={}, + fetch_list=[result_1, result_2]) + #gen.set_state(cur_state) + gen.manual_seed(123123143) + out2 = exe.run(train_program, + feed={}, + fetch_list=[result_1, result_2]) + out1_res1 = np.array(out1[0]) + out1_res2 = np.array(out1[1]) + out2_res1 = np.array(out2[0]) + out2_res2 = np.array(out2[1]) + + if not core.is_compiled_with_cuda(): + self.assertTrue(np.allclose(out1_res1, out2_res1)) + self.assertTrue(np.allclose(out1_res2, out2_res2)) + self.assertTrue(not np.allclose(out1_res2, out1_res1)) + + def test_generator_randint_dygraph(self): + """Test Generator seed.""" + fluid.enable_dygraph() + + gen = paddle.manual_seed(12312321111) + x = paddle.randint(low=1) + st1 = gen.get_state() + x1 = paddle.randint(low=1) + gen.set_state(st1) + x2 = paddle.randint(low=1) + gen.manual_seed(12312321111) + x3 = paddle.randint(low=1) + x_np = x.numpy() + x1_np = x1.numpy() + x2_np = x2.numpy() + x3_np = x3.numpy() + if not core.is_compiled_with_cuda(): + self.assertTrue(np.allclose(x1_np, x2_np)) + self.assertTrue(np.allclose(x_np, x3_np)) + + def test_generator_ranint_static(self): fluid.disable_dygraph() - gen = generator.Generator() - gen.manual_seed(123123143) + gen = paddle.manual_seed(123123143) startup_program = fluid.Program() train_program = fluid.Program() @@ -267,11 +312,10 @@ def test_generator_ranint_static(self): def test_generator_randperm_dygraph(self): """Test Generator seed.""" - gen = generator.Generator() fluid.enable_dygraph() - gen.manual_seed(12312321111) + gen = paddle.manual_seed(12312321111) x = paddle.randperm(10) st1 = gen.get_state() x1 = paddle.randperm(10) @@ -284,9 +328,6 @@ def test_generator_randperm_dygraph(self): x2_np = x2.numpy() x3_np = x3.numpy() - # print("## {}".format(x1_np)) - # print("## {}".format(x2_np)) - if not core.is_compiled_with_cuda(): print(">>>>>>> randperm dygraph >>>>>>>") self.assertTrue(np.allclose(x1_np, x2_np)) @@ -296,8 +337,7 @@ def test_generator_randperm_static(self): fluid.disable_dygraph() - gen = generator.Generator() - gen.manual_seed(123123143) + paddle.manual_seed(123123143) startup_program = fluid.Program() train_program = fluid.Program() @@ -312,8 +352,8 @@ def test_generator_randperm_static(self): out1 = exe.run(train_program, feed={}, fetch_list=[result_1, result_2]) - #gen.set_state(cur_state) - gen.manual_seed(123123143) + + paddle.manual_seed(123123143) out2 = exe.run(train_program, feed={}, fetch_list=[result_1, result_2]) @@ -331,7 +371,7 @@ def test_generator_randperm_static(self): def test_generator_sampling_id_dygraph(self): """Test Generator seed.""" - gen = generator.Generator() + gen = paddle.manual_seed(12312321111) fluid.enable_dygraph() @@ -339,14 +379,17 @@ def test_generator_sampling_id_dygraph(self): x = fluid.layers.uniform_random( [10, 10], dtype="float32", min=0.0, max=1.0) y = fluid.layers.sampling_id(x) + st1 = gen.get_state() x1 = fluid.layers.uniform_random( [10, 10], dtype="float32", min=0.0, max=1.0) y1 = fluid.layers.sampling_id(x) + gen.set_state(st1) x2 = fluid.layers.uniform_random( [10, 10], dtype="float32", min=0.0, max=1.0) y2 = fluid.layers.sampling_id(x) + gen.manual_seed(12312321111) x3 = fluid.layers.uniform_random( [10, 10], dtype="float32", min=0.0, max=1.0) @@ -357,9 +400,6 @@ def test_generator_sampling_id_dygraph(self): x2_np = y2.numpy() x3_np = y3.numpy() - print("## {}".format(x1_np)) - print("## {}".format(x2_np)) - if not core.is_compiled_with_cuda(): print(">>>>>>> sampling id dygraph >>>>>>>") self.assertTrue(np.allclose(x1_np, x2_np)) @@ -369,8 +409,7 @@ def test_generator_randperm_static(self): fluid.disable_dygraph() - gen = generator.Generator() - gen.manual_seed(123123143) + paddle.manual_seed(123123143) startup_program = fluid.Program() train_program = fluid.Program() @@ -386,8 +425,8 @@ def test_generator_randperm_static(self): out1 = exe.run(train_program, feed={}, fetch_list=[result_1, result_2]) - #gen.set_state(cur_state) - gen.manual_seed(123123143) + + paddle.manual_seed(123123143) out2 = exe.run(train_program, feed={}, fetch_list=[result_1, result_2]) @@ -406,8 +445,7 @@ def test_generator_randperm_static(self): def test_gen_TruncatedNormal_initializer(self): fluid.disable_dygraph() - gen = generator.Generator() - gen.manual_seed(123123143) + gen = paddle.manual_seed(123123143) cur_state = gen.get_state() startup_program = fluid.Program() @@ -432,9 +470,7 @@ def test_gen_TruncatedNormal_initializer(self): out1 = exe.run(train_program, feed={}, fetch_list=[result_1, result_2]) - #gen.set_state(cur_state) - #gen.set_state(cur_state) gen.manual_seed(123123143) with fluid.program_guard(train_program, startup_program): exe.run(startup_program) @@ -447,11 +483,6 @@ def test_gen_TruncatedNormal_initializer(self): out2_res1 = np.array(out2[0]) out2_res2 = np.array(out2[1]) - print(out1_res1) - print(out1_res2) - print(out2_res1) - print(out2_res2) - if not core.is_compiled_with_cuda(): print(">>>>>>> sampling id static >>>>>>>") self.assertTrue(np.allclose(out1_res1, out2_res1)) diff --git a/python/paddle/fluid/tests/unittests/test_regularizer.py b/python/paddle/fluid/tests/unittests/test_regularizer.py index 58b407f8bc1f4..44087c5421a5e 100644 --- a/python/paddle/fluid/tests/unittests/test_regularizer.py +++ b/python/paddle/fluid/tests/unittests/test_regularizer.py @@ -169,9 +169,10 @@ def run_program(self, place, feed_list): return param_sum def check_l2decay_regularizer(self, place, model): + paddle.manual_seed(1) + paddle.framework.random._manual_program_seed(1) main_prog = fluid.framework.Program() startup_prog = fluid.framework.Program() - startup_prog.random_seed = 1 with self.scope_prog_guard( main_prog=main_prog, startup_prog=startup_prog): data = fluid.layers.data( @@ -188,9 +189,11 @@ def check_l2decay_regularizer(self, place, model): return param_sum def check_l2decay(self, place, model): + paddle.manual_seed(1) + paddle.framework.random._manual_program_seed(1) main_prog = fluid.framework.Program() startup_prog = fluid.framework.Program() - startup_prog.random_seed = 1 + with self.scope_prog_guard( main_prog=main_prog, startup_prog=startup_prog): data = fluid.layers.data( @@ -243,7 +246,8 @@ def test_repeated_regularization(self): with fluid.dygraph.guard(): input = fluid.dygraph.to_variable( np.random.randn(3, 5).astype('float32')) - fluid.default_main_program().random_seed = 1 + paddle.manual_seed(1) + paddle.framework.random._manual_program_seed(1) linear1 = fluid.dygraph.Linear( 5, 2, param_attr=fc_param_attr, bias_attr=fc_param_attr) diff --git a/python/paddle/fluid/tests/unittests/test_transformer_api.py b/python/paddle/fluid/tests/unittests/test_transformer_api.py index c8d1e77134036..5fea9f69a18c8 100644 --- a/python/paddle/fluid/tests/unittests/test_transformer_api.py +++ b/python/paddle/fluid/tests/unittests/test_transformer_api.py @@ -211,7 +211,8 @@ def ffn(src, encoder_layer, ffn_fc1_act="relu"): class TestTransformer(unittest.TestCase): def test_multi_head_attention(self): def multihead_attention_test_helper(self_attention, cache): - paddle.framework.manual_seed(2020) + paddle.manual_seed(2020) + paddle.framework.random._manual_program_seed(2020) # self_attention|cross_attention, cache|No cache with fluid.dygraph.guard(fluid.CPUPlace()): @@ -275,6 +276,7 @@ def test_transformer_encoder_layer(self): with fluid.dygraph.guard(fluid.CPUPlace()): paddle.framework.manual_seed(2020) + paddle.framework.random._manual_program_seed(2020) ffn_fc1_act = "relu" # 1.generate basic params diff --git a/python/paddle/fluid/tests/unittests/test_uniform_random_op.py b/python/paddle/fluid/tests/unittests/test_uniform_random_op.py index 158462a1e6e10..a04aaaef0d41b 100644 --- a/python/paddle/fluid/tests/unittests/test_uniform_random_op.py +++ b/python/paddle/fluid/tests/unittests/test_uniform_random_op.py @@ -21,6 +21,7 @@ from op_test import OpTest import paddle import paddle.fluid.core as core +import paddle from paddle.fluid.op import Operator import paddle.fluid as fluid from paddle.fluid import Program, program_guard @@ -234,16 +235,16 @@ def test_check_output(self): def check_with_place(self, place): scope = core.Scope() out = scope.var("X").get_selected_rows() - + paddle.manual_seed(10) op = Operator( "uniform_random", Out="X", - shape=[4, 784], + shape=[100, 784], min=-5.0, max=10.0, seed=10) op.run(scope, place) - self.assertEqual(out.get_tensor().shape(), [4, 784]) + self.assertEqual(out.get_tensor().shape(), [100, 784]) hist, prob = output_hist(np.array(out.get_tensor())) self.assertTrue( np.allclose( @@ -255,19 +256,19 @@ class TestUniformRandomOpSelectedRowsWithDiagInit( def check_with_place(self, place): scope = core.Scope() out = scope.var("X").get_selected_rows() - + paddle.manual_seed(10) op = Operator( "uniform_random", Out="X", - shape=[4, 784], + shape=[100, 784], min=-5.0, max=10.0, seed=10, - diag_num=4, + diag_num=100, diag_step=784, diag_val=1.0) op.run(scope, place) - self.assertEqual(out.get_tensor().shape(), [4, 784]) + self.assertEqual(out.get_tensor().shape(), [100, 784]) hist, prob = output_hist_diag(np.array(out.get_tensor())) self.assertTrue( np.allclose( @@ -276,6 +277,7 @@ def check_with_place(self, place): class TestUniformRandomOpApi(unittest.TestCase): def test_api(self): + paddle.manual_seed(10) x = fluid.layers.data('x', shape=[16], dtype='float32', lod_level=1) y = fluid.layers.fc(x, size=16, @@ -347,12 +349,15 @@ def test_attr_tensor_int32_API(self): class TestUniformRandomOp_API_seed(unittest.TestCase): def test_attr_tensor_API(self): + _seed = 10 + gen = paddle.manual_seed(_seed) + gen._is_init_py = False startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(train_program, startup_program): _min = 5 _max = 10 - _seed = 10 + ret = fluid.layers.nn.uniform_random( [2, 3, 2], min=_min, max=_max, seed=_seed) ret_2 = fluid.layers.nn.uniform_random( @@ -386,8 +391,8 @@ def check_with_place(self, place): scope = core.Scope() out = scope.var("X").get_selected_rows() shape_tensor = scope.var("Shape").get_tensor() - shape_tensor.set(np.array([4, 784]).astype("int64"), place) - + shape_tensor.set(np.array([100, 784]).astype("int64"), place) + paddle.manual_seed(10) op = Operator( "uniform_random", ShapeTensor="Shape", @@ -396,7 +401,7 @@ def check_with_place(self, place): max=10.0, seed=10) op.run(scope, place) - self.assertEqual(out.get_tensor().shape(), [4, 784]) + self.assertEqual(out.get_tensor().shape(), [100, 784]) hist, prob = output_hist(np.array(out.get_tensor())) self.assertTrue( np.allclose( @@ -418,10 +423,10 @@ def check_with_place(self, place): scope = core.Scope() out = scope.var("X").get_selected_rows() shape_1 = scope.var("shape1").get_tensor() - shape_1.set(np.array([4]).astype("int64"), place) + shape_1.set(np.array([100]).astype("int64"), place) shape_2 = scope.var("shape2").get_tensor() shape_2.set(np.array([784]).astype("int64"), place) - + paddle.manual_seed(10) op = Operator( "uniform_random", ShapeTensorList=["shape1", "shape2"], @@ -430,7 +435,7 @@ def check_with_place(self, place): max=10.0, seed=10) op.run(scope, place) - self.assertEqual(out.get_tensor().shape(), [4, 784]) + self.assertEqual(out.get_tensor().shape(), [100, 784]) hist, prob = output_hist(np.array(out.get_tensor())) self.assertTrue( np.allclose( @@ -455,21 +460,21 @@ def test_errors(self): def test_Variable(): x1 = fluid.create_lod_tensor( - np.zeros((4, 784)), [[1, 1, 1, 1]], fluid.CPUPlace()) + np.zeros((100, 784)), [[10, 10, 10, 70]], fluid.CPUPlace()) fluid.layers.uniform_random_batch_size_like(x1) self.assertRaises(TypeError, test_Variable) def test_shape(): x1 = fluid.layers.data( - name='x2', shape=[4, 784], dtype='float32') + name='x2', shape=[100, 784], dtype='float32') fluid.layers.uniform_random_batch_size_like(x1, shape="shape") self.assertRaises(TypeError, test_shape) def test_dtype(): x2 = fluid.layers.data( - name='x2', shape=[4, 784], dtype='float32') + name='x2', shape=[100, 784], dtype='float32') fluid.layers.uniform_random_batch_size_like(x2, 'int32') self.assertRaises(TypeError, test_dtype) @@ -495,20 +500,20 @@ def test_errors(self): def test_Variable(): x1 = fluid.create_lod_tensor( - np.zeros((4, 784)), [[1, 1, 1, 1]], fluid.CPUPlace()) + np.zeros((100, 784)), [[10, 10, 10, 70]], fluid.CPUPlace()) paddle.tensor.random.uniform(x1) self.assertRaises(TypeError, test_Variable) def test_Variable2(): - x1 = np.zeros((4, 784)) + x1 = np.zeros((100, 784)) paddle.tensor.random.uniform(x1) self.assertRaises(TypeError, test_Variable2) def test_dtype(): x2 = fluid.layers.data( - name='x2', shape=[4, 784], dtype='float32') + name='x2', shape=[100, 784], dtype='float32') paddle.tensor.random.uniform(x2, 'int32') self.assertRaises(TypeError, test_dtype) diff --git a/python/paddle/fluid/tests/unittests/test_while_op.py b/python/paddle/fluid/tests/unittests/test_while_op.py index 207ff66a0f877..ee01bfb21f820 100644 --- a/python/paddle/fluid/tests/unittests/test_while_op.py +++ b/python/paddle/fluid/tests/unittests/test_while_op.py @@ -15,6 +15,7 @@ from __future__ import print_function import unittest +import paddle import paddle.fluid.layers as layers from paddle.fluid.executor import Executor import paddle.fluid.core as core diff --git a/python/paddle/framework/random.py b/python/paddle/framework/random.py index 1bb13294805ef..2555d24464112 100644 --- a/python/paddle/framework/random.py +++ b/python/paddle/framework/random.py @@ -14,28 +14,50 @@ # TODO: define random api import paddle.fluid as fluid +from paddle.fluid import core __all__ = ['manual_seed'] def manual_seed(seed): """ - :alias_main: paddle.manual_seed - :alias: paddle.manual_seed,paddle.framework.random.manual_seed - Set global manual seed for program + Sets the seed for global default generator, which manages the random number generation. Args: - manual_seed(int): random seed for program + seed(int): The random seed to set. It is recommend to set a large int number. Returns: - None. + Generator: The global default generator object. Examples: .. code-block:: python - from paddle.framework import manual_seed - manual_seed(102) + import paddle + gen = paddle.manual_seed(102) + + """ + #TODO(zhiqiu): 1. remove program.random_seed when all random-related op upgrade + # 2. support gpu generator by global device + + seed = int(seed) + + core.default_cpu_generator()._is_init_py = True + return core.default_cpu_generator().manual_seed(seed) + + +def _manual_program_seed(seed): + """ + Sets global seed for generating random numbers. + + NOTE(zhiqiu): This is the original implemention of manual_seed. Keeps it temporally + since CUDA generator is not developed, so we need it in the unittest. + + Args: + seed(int): The random seed to set. It is recommend to set a large int number. + + Returns: + None """ fluid.default_main_program().random_seed = seed fluid.default_startup_program().random_seed = seed diff --git a/python/paddle/incubate/hapi/tests/test_model.py b/python/paddle/incubate/hapi/tests/test_model.py index 8e0c051ee8c39..7fc471aa1e2ee 100644 --- a/python/paddle/incubate/hapi/tests/test_model.py +++ b/python/paddle/incubate/hapi/tests/test_model.py @@ -22,6 +22,7 @@ import shutil import tempfile +import paddle from paddle import fluid from paddle.nn import Conv2d, Pool2D, Linear, ReLU, Sequential, Softmax from paddle.fluid.dygraph.base import to_variable @@ -170,8 +171,8 @@ def setUpClass(cls): cls.test_dataset, places=cls.device, batch_size=64) seed = 333 - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) dy_lenet = LeNetDygraph() cls.init_param = dy_lenet.state_dict() @@ -222,8 +223,8 @@ def test_prepare_context(self): def fit(self, dynamic, num_replicas=None, rank=None): fluid.enable_dygraph(self.device) if dynamic else None seed = 333 - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) net = LeNet(classifier_activation=None) optim_new = fluid.optimizer.Adam( @@ -327,8 +328,8 @@ def forward(self, x): class TestModelFunction(unittest.TestCase): def set_seed(self, seed=1024): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + paddle.manual_seed(seed) + paddle.framework.random._manual_program_seed(seed) def test_train_batch(self, dynamic=True): dim = 20 diff --git a/python/paddle/incubate/hapi/tests/test_text.py b/python/paddle/incubate/hapi/tests/test_text.py index bdc637997b0cb..c4fef0d749ce7 100644 --- a/python/paddle/incubate/hapi/tests/test_text.py +++ b/python/paddle/incubate/hapi/tests/test_text.py @@ -20,6 +20,7 @@ import numpy as np +import paddle import paddle.fluid as fluid from paddle.fluid.dygraph import Embedding, Linear, Layer from paddle.fluid.layers import BeamSearchDecoder @@ -87,15 +88,18 @@ def _calc_output(self, place, mode="test", dygraph=True): fluid.enable_dygraph(place) else: fluid.disable_dygraph() - fluid.default_main_program().random_seed = self._random_seed - fluid.default_startup_program().random_seed = self._random_seed - layer = self.model_cls(**self.attrs) if isinstance( - self.attrs, dict) else self.model_cls(*self.attrs) - model = Model(layer, inputs=self.make_inputs()) - model.prepare() - if self.param_states: - model.load(self.param_states, optim_state=None) - return model.test_batch(self.inputs) + gen = paddle.manual_seed(self._random_seed) + gen._is_init_py = False + paddle.framework.random._manual_program_seed(self._random_seed) + scope = fluid.core.Scope() + with fluid.scope_guard(scope): + layer = self.model_cls(**self.attrs) if isinstance( + self.attrs, dict) else self.model_cls(*self.attrs) + model = Model(layer, inputs=self.make_inputs()) + model.prepare() + if self.param_states: + model.load(self.param_states, optim_state=None) + return model.test_batch(self.inputs) def check_output_with_place(self, place, mode="test"): dygraph_output = self._calc_output(place, mode, dygraph=True) @@ -129,12 +133,9 @@ def setUp(self): @staticmethod def model_init(model, input_size, hidden_size): - model.lstm = RNN( - BasicLSTMCell( - input_size, - hidden_size, - param_attr=fluid.ParamAttr(name="lstm_weight"), - bias_attr=fluid.ParamAttr(name="lstm_bias"))) + model.lstm = RNN(BasicLSTMCell( + input_size, + hidden_size, )) @staticmethod def model_forward(model, inputs):